Esempio n. 1
0
 def run(self, console, frames=None, times=4):
     if times > 1:
         print_result('Running %s' % self.__class__.__name__)
         while times > 0:
             self.run(console, frames, times=1)
             times -= 1
         print_result('')
         return
     if frames is None:
         frames = self.default_frames
     self.total_frames = 0
     self.tiles = 0
     console.clear()
     self.start_time = time.clock()
     while self.total_frames < frames:
         self.total_frames += 1
         self.test(console)
         for event in tdl.event.get():
             if event.type == 'QUIT':
                 raise SystemExit('Benchmark Canceled')
     self.total_time = time.clock() - self.start_time
     self.tiles_per_second = self.tiles / self.total_time
     print_result(
         '%i tiles drawn in %.2f seconds, %.2f characters/ms, %.2f FPS' %
         (self.tiles, self.total_time,self.tiles_per_second / 1000,
          self.total_frames / self.total_time))
 def recolorize(self):
     self.after_id = None
     if not self.delegate:
         if DEBUG: print("no delegate")
         return
     if not self.allow_colorizing:
         if DEBUG: print("auto colorizing is off")
         return
     if self.colorizing:
         if DEBUG: print("already colorizing")
         return
     try:
         self.stop_colorizing = False
         self.colorizing = True
         if DEBUG: print("colorizing...")
         t0 = time.clock()
         self.recolorize_main()
         t1 = time.clock()
         if DEBUG: print("%.3f seconds" % (t1-t0))
     finally:
         self.colorizing = False
     if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"):
         if DEBUG: print("reschedule colorizing")
         self.after_id = self.after(1, self.recolorize)
     if self.close_when_done:
         top = self.close_when_done
         self.close_when_done = None
         top.destroy()
Esempio n. 3
0
 def __init__(self,fname):
     #dump to binary
     fndata=fname;
     if (not os.path.isfile(fndata)):
         fndatain=fndata.replace('/bin/','/');
         datain=Epix100a(fndatain);
         #write header
         binheader=np.zeros(16).astype(np.uint32);
         binheader[0:6]=[datain.nframes, datain.my*datain.mx, datain.my, datain.mx, datain.nblocks, datain.nbcols];
         binheader.tofile(fndata);    
         #write data
         dataout=np.memmap(fndata,dtype=np.int16,mode='r+', shape=(datain.nframes,datain.my,datain.mx),offset=64);
         t0=time.clock();
         for iframe in range(datain.nframes):
             dataout[iframe]=datain.frames(iframe);
             if (iframe%100==0):
                 #progress(iframe,nframes,iframe);
                 print str(iframe)+' - '+str(1000*(time.clock()-t0)/(iframe+1))+' ms. average frame: '+str(np.mean(datain.frames(iframe)));
         dataout.flush();
         
         del dataout;
         del datain;
     #get nr of frames
     data=np.memmap(fndata,dtype=np.uint32,mode='r',shape=((64)),offset=0); 
     self.nframes=data[0]; self.nframesize=data[1]; self.my=data[2]; self.mx=data[3]; self.nblocks=data[4]; self.nbcols=data[5];
     self.data=np.memmap(fndata,dtype=np.int16,mode='c',shape=(self.nframes,self.my,self.mx),offset=64);
def RunTest(testNum, p0, p1, p2, hasAnswer, p3):
    obj = JanuszInTheCasino()
    startTime = time.clock()
    answer = obj.findProbability(p0, p1, p2)
    endTime = time.clock()
    testTime.append(endTime - startTime)
    res = True
    if hasAnswer:
        res = answer == p3
    if res:
        print(str("Test #") + str(testNum) + ": Passed")
        return res
    print(str("Test #") + str(testNum) + str(":"))
    print(("[") + str(p0) + str(",") + str(p1) + str(",") + str(p2) + str("]"))
    if (hasAnswer):
        print(str("Expected:"))
        print(str(p3))

    print(str("Received:"))
    print(str(answer))
    print(str("Verdict:"))
    if (not res):
        print(("Wrong answer!!"))
    elif ((endTime - startTime) >= 20):
        print(str("FAIL the timeout"))
        res = False
    elif (hasAnswer):
        print(str("OK!!"))
    else:
        print(str("OK, but is it right?"))
    print("Time: %.11f seconds" % (endTime - startTime))
    print(str("-----------------------------------------------------------"))
    return res
Esempio n. 5
0
 def __init__(self, fndark, nblocksize):
     if (os.path.isfile(fndark+'-dark.npz')):
         npzfile=np.load(fndark+'-dark.npz');
         self.dmean=npzfile['dmean'];
         self.dstd=npzfile['dstd'];
         self.dbpm=npzfile['dbpm'];
     else:
         dark=Binary(fndark);
         nframes=dark.nframes; my=dark.my; mx=dark.mx;
         nblocks=nframes//nblocksize;
         
         bmed=np.zeros((nblocks,my,mx));
         bstd=np.zeros((nblocks,my,mx));
         for iblock in range(nblocks):
             t0=time.clock();
             a=dark.data[iblock*nblocksize:(iblock+1)*nblocksize];
             a,idx=dropbadframes(a);
             print '- read block, dropped bad, subtracted dark in '+str(time.clock()-t0)+'s';
             nfb=a.shape[0];                
             bmed[iblock,:,:]=np.median(a,axis=0);
             bstd[iblock,:,:]=np.std(a,axis=0);
         self.dmean=np.mean(bmed,axis=0);
         self.dstd=np.sqrt(np.sum((bstd)**2,axis=0));
         self.dbpm=self.dstd<(np.median(self.dstd)+5*np.std(self.dstd));
         self.dbpm=self.dstd<(np.median(self.dstd*self.dbpm)+5*np.std(self.dstd*self.dbpm));
         
         np.savez(fndark+'-dark',dmean=self.dmean,dstd=self.dstd,dbpm=self.dbpm);
         del dark;
Esempio n. 6
0
def main():
 #   n = raw_input("What's the N?")
    n = sys.argv[1]
    start = time.clock()
    tryAll(n)
    print "It took", (time.clock()-start),"seconds to complete"
    print "now:", time.clock(), "start:", start
    def kontrola(self, datadir=None):
        """
        Jednoduché vyhodnocení výsledků
        """

        obrazky, reseni = self.readImageDir(datadir)

        vysledky = []

        for i in range(0, len(obrazky)):
            cas1 = time.clock()
            im = skimage.io.imread(obrazky[i])
            result = self.rozpoznejZnacku(im)

            cas2 = time.clock()

            if((cas2 - cas1) >= 1.0):
                print "cas vyprsel"
                result = 0

            vysledky.append(result)

        hodnoceni = np.array(reseni) == np.array(vysledky)
        skore = np.sum(hodnoceni.astype(np.int)) / np.float(len(reseni))

        print skore
Esempio n. 8
0
    def wait_for_srq(self, timeout=25):
        """Wait for a serial request (SRQ) coming from the instrument.

        Note that this method is not ended when *another* instrument signals an
        SRQ, only *this* instrument.

        :param timeout: the maximum waiting time in seconds.
                        Defaul: 25 (seconds).
                        None means waiting forever if necessary.
        """

        vpp43.enable_event(self.vi, VI_EVENT_SERVICE_REQ, VI_QUEUE)
        if timeout and not(0 <= timeout <= 4294967):
            raise ValueError("timeout value is invalid")
        starting_time = time.clock()
        while True:
            if timeout is None:
                adjusted_timeout = VI_TMO_INFINITE
            else:
                adjusted_timeout = int((starting_time + timeout - time.clock())
                                       * 1000)
                if adjusted_timeout < 0:
                    adjusted_timeout = 0
            event_type, context = \
                vpp43.wait_on_event(self.vi, VI_EVENT_SERVICE_REQ,
                                    adjusted_timeout)
            vpp43.close(context)
            if self.stb & 0x40:
                break
        vpp43.discard_events(self.vi, VI_EVENT_SERVICE_REQ, VI_QUEUE)
Esempio n. 9
0
 def timing(self):
     t0 = time.clock()
     try:
         yield
     finally:
         te = time.clock()
         self.laps.append(te - t0)
Esempio n. 10
0
def BENCH():
    print('\n\n\n--->BEGIN BENCHMARK')
    bt0 = time.clock()
    ###---MAKE A BIG LIST
    tsize = 25
    tlist = []
    for x in range(tsize):
        for y in range(tsize):
            for z in range(tsize):
                tlist.append((x,y,z))
                tlist.append((x,y,z))

    ###---FUNCTION TO TEST
    bt1 = time.clock()

    #ll = deDupe(tlist)
    #ll = f5(tlist)
    print('LENS - ', len(tlist), len(ll) )

    bt2 = time.clock()
    btRUNb = bt2 - bt1
    btRUNa = bt1 - bt0
    print('--->SETUP TIME    : ', btRUNa)
    print('--->BENCHMARK TIME: ', btRUNb)
    print('--->GRIDSIZE: ', tsize, ' - ', tsize*tsize*tsize)
Esempio n. 11
0
	def draw(self):
		self.screen.clear_pixel()
		
		start = time.clock() * 2.77 + math.sin(time.clock())
		end = time.clock() * 7.35 + 0.5 * math.pi
		distance = end - start
		start = start % (2 * math.pi)
		end = end % (2 * math.pi)

		invert = distance % (4 * math.pi) > 2 * math.pi
		if invert:
			buf = end
			end = start
			start = buf

		hue = (time.clock() * 0.01) % 1
		color = hsv_to_color(hue, 1, 1)

		for x in range(16):
			for y in range(16):
				r = ((x - 8)**2 + (y - 8)**2)**0.5
				if r == 0:
					r = 0.001
				angle = math.acos((x - 8) / r)
				if y - 8 < 0:
					angle = 2 * math.pi - angle
				if (angle > start and angle < end) or (end < start and (angle > start or angle < end)):
					self.screen.pixel[x][y] = color

		self.screen.update()
Esempio n. 12
0
 def saveBrain(self, filename):
     """Dump the contents of the bot's brain to a file on disk."""
     if self._verboseMode: print("Saving brain to %s..." % filename, end=' ')
     start = time.clock()
     self._brain.save(filename)
     if self._verboseMode:
         print("done (%.2f seconds)" % (time.clock() - start))
Esempio n. 13
0
    def learn(self, filename):
        """Load and learn the contents of the specified AIML file.

        If filename includes wildcard characters, all matching files
        will be loaded and learned.

        """
        for f in glob.glob(filename):
            if self._verboseMode: print("Loading %s..." % f, end=' ')
            start = time.clock()
            # Load and parse the AIML file.
            parser = AimlParser.create_parser()
            handler = parser.getContentHandler()
            handler.setEncoding(self._textEncoding)
            try:
                parser.parse(f)
            except xml.sax.SAXParseException as msg:
                err = "\nFATAL PARSE ERROR in file %s:\n%s\n" % (f, msg)
                sys.stderr.write(err)
                continue
            # store the pattern/template pairs in the PatternMgr.
            for key, tem in list(handler.categories.items()):
                self._brain.add(key, tem)
            # Parsing was successful.
            if self._verboseMode:
                print("done (%.2f seconds)" % (time.clock() - start))
Esempio n. 14
0
  def get_content(self, path, method='POST', body=None, params=''):
    uri= self.get_base_api_url() % (path, params)
    startTime = time.clock()
    response, content = http.request(
      uri,
      method=method, body=json.dumps(body) if body else None,
      headers={'Content-Type': 'application/json; charset=UTF-8'})
    contentLen = len(content)

    try:
      content = json.loads(content)
    except ValueError:
      logging.error('while requesting {}'.format(uri))
      logging.error('non-json api content %s' % content[:1000])
      raise ApiException('The API returned invalid JSON')

    if response.status >= 300:
      logging.error('error api response %s' % response)
      logging.error('error api content %s' % content)
      if 'error' in content:
        raise ApiException(content['error']['message'])
      else:
        raise ApiException('Something went wrong with the API call!')

    logging.info('get_content {}: {}kb {}s'.format(uri, contentLen/1024, time.clock() - startTime))
    return content
Esempio n. 15
0
def req():
    # Get URLs from a text file, remove white space.
    db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
    db_worker_view = db.get_work_view()
    articles = db_worker_view.retrieve_all_articles()
    #articles = db_worker_view.retrieve_all_articles_questionmark()
    # measure time
    start = time.clock()
    start_time_iteration = start
    iteration_number = 483
    for i, article in enumerate(articles):
        # print some progress
        if i % 10000 == 0:
            #print time for the iteration
            seconds = time.clock() - start_time_iteration
            m, s = divmod(seconds, 60)
            h, m = divmod(m, 60)
            print "Number of crawled articles: %d. Total time for last iteration of 10000 articles: %d:%02d:%02d" % (i, h, m, s)
            start_time_iteration = time.clock()
            iteration_number += 1

        # Thread pool.
        # Blocks other threads (more than the set limit).
        pool.acquire(blocking=True)
        # Create a new thread.
        # Pass each URL (i.e. u parameter) to the worker function.
        t = threading.Thread(target=worker, args=(MEDIAWIKI_API_ENDPOINT+urllib.quote(article['title'])+'/'+str(article['rev_id']), article, iteration_number))

        # Start the newly create thread.
        t.start()
    seconds = time.clock() - start
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    print "Total time: %d:%02d:%02d" % (h, m, s)
Esempio n. 16
0
 def apply(self, expr, evaluation):
     'Timing[expr_]'
     
     start = time.clock()
     result = expr.evaluate(evaluation)
     stop = time.clock()
     return Expression('List', Real(stop - start), result)
Esempio n. 17
0
def run(test):
    global test_name
    test_name = test

    t0 = time.clock()
    msg("setting up supervisor")
    exe = test + '.exe'
    proc = Popen(exe, bufsize=1<<20 , stdin=PIPE, stdout=PIPE, stderr=PIPE)
    done = multiprocessing.Value(ctypes.c_bool)
    queue = multiprocessing.Queue(maxsize=5)#(maxsize=1024)
    workers = []
    for n in range(NUM_WORKERS):
        worker = multiprocessing.Process(name='Worker-' + str(n + 1),
                                         target=init_worker,
                                         args=[test, MAILBOX, queue, done])
        workers.append(worker)
        child_processes.append(worker)
    for worker in workers:
        worker.start()
    msg("running test")
    interact(proc, queue)
    with done.get_lock():
        done.value = True
    for worker in workers:
        worker.join()
    msg("python is done")
    assert queue.empty(), "did not validate everything"
    dt = time.clock() - t0
    msg("took", round(dt, 3), "seconds")
Esempio n. 18
0
def main():
    u"メインメソッド"
    
    starttime = time.clock()
    run()
    time1 = time.clock()
    print "time: %f" % (time1 - starttime)
Esempio n. 19
0
    def fit(self, X, y, valid_X=None, valid_y=None):
        input_size = X.shape[1]
        output_size = len(np.unique(y))
        X_sym = T.matrix('x')
        y_sym = T.ivector('y')
        self.layers_ = []
        self.layer_sizes_ = [input_size]
        self.layer_sizes_.extend(self.hidden_layer_sizes)
        self.layer_sizes_.append(output_size)
        self.dropout_layers_ = []
        self.training_scores_ = []
        self.validation_scores_ = []
        self.training_loss_ = []
        self.validation_loss_ = []

        if not hasattr(self, 'fit_function'):
            self._setup_functions(X_sym, y_sym,
                                  self.layer_sizes_)

        batch_indices = list(range(0, X.shape[0], self.batch_size))
        if X.shape[0] != batch_indices[-1]:
            batch_indices.append(X.shape[0])

        start_time = time.clock()
        itr = 0
        best_validation_score = np.inf
        while (itr < self.max_iter):
            print("Starting pass %d through the dataset" % itr)
            itr += 1
            batch_bounds = list(zip(batch_indices[:-1], batch_indices[1:]))
            # Random minibatches
            self.random_state.shuffle(batch_bounds)
            for start, end in batch_bounds:
                self.partial_fit(X[start:end], y[start:end])
            current_training_score = (self.predict(X) != y).mean()
            self.training_scores_.append(current_training_score)
            current_training_loss = self.loss_function(X, y)
            self.training_loss_.append(current_training_loss)
            # Serialize each save_frequency iteration
            if (itr % self.save_frequency) == 0 or (itr == self.max_iter):
                f = open(self.model_save_name + "_snapshot.pkl", 'wb')
                cPickle.dump(self, f, protocol=2)
                f.close()
            if valid_X is not None:
                current_validation_score = (
                    self.predict(valid_X) != valid_y).mean()
                self.validation_scores_.append(current_validation_score)
                current_training_loss = self.loss_function(valid_X, valid_y)
                self.validation_loss_.append(current_training_loss)
                print("Validation score %f" % current_validation_score)
                # if we got the best validation score until now, save
                if current_validation_score < best_validation_score:
                    best_validation_score = current_validation_score
                    f = open(self.model_save_name + "_best.pkl", 'wb')
                    cPickle.dump(self, f, protocol=2)
                    f.close()
        end_time = time.clock()
        print("Total training time ran for %.2fm" %
              ((end_time - start_time) / 60.))
        return self
Esempio n. 20
0
def Check():		
	start =I
	end=perm_apply(F, start)
	shortest_path(start, end) #path length 1
	start=I
	middle1 =perm_apply(F, start)
	middle2 =perm_apply(L, middle1)
	middle3 =perm_apply(F, middle2)
	end =perm_apply(L, middle3)
	shortest_path(start, end)		#path length 4
	start =I
	middle1=perm_apply(F, start)
	middle2=perm_apply(F, middle1)
	end =perm_apply(Li, middle2)	
	shortest_path(start,end)#path length 3
	from time import clock
	m=clock()
	start = (6, 7, 8, 20, 18, 19, 3, 4, 5, 16, 17, 15, 0, 1, 2, 14, 12, 13, 10, 11, 9, 21, 22, 23)
	end =I
	shortest_path(start,end)		#path length 14
	print 'time taken:',clock()-m
	start=I
	middle1 =perm_apply(F, start)
	middle2 =perm_apply(Li, middle1)
	middle4=perm_apply(Fi, middle2)
	middle3=perm_apply(U, middle4)
	end =perm_apply(U, middle3)
	shortest_path(start, end)		#path length 5
	start=I
	middle1=perm_apply(F, start)
	middle2=perm_apply(F, middle1)
	shortest_path(start,middle2)		#path length 2

#Check()
#input_configuration()
Esempio n. 21
0
    def testTime(self):

        nUIDs     = 100000
        startTime = time.clock()
        for i in range(0,nUIDs):
            makeUUID()
        print("We can make %i UUIDs in %f seconds" %(nUIDs, time.clock() - startTime))
Esempio n. 22
0
def testRun():
	# For testing
	# count = [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000]

	count = 5000

	while count < 5000000000:
	# for num in testList:
		randList = [None] * count
		for n in range(0, count):
			randNum = random.randrange(0, 101)
			if (random.randrange(0, 2) == 0):
				randNum = randNum * -1
			randList[n] = randNum

		startTime = time.clock()
		result = linearSearch(randList)
		stopTime = time.clock()

		resultTime = stopTime - startTime

		print("n: " + str(len(randList)))
		print("Largest Result: " + str(result[2]))
		print("Running Time: " + str(resultTime))

		count = count * 10
 def run(self):
     global q
     while(time.clock() < 10):
         if(self.nextTime < time.clock() and not q.empty()):
             f = q.get()
             print("Removing " + f)
             self.nextTime += random.random()*2
Esempio n. 24
0
	def SaveData (self, fname, verbose = True):

		if (verbose):
			print ("  Saving measurement to %s ... " % fname)

		start = time.clock()

		f = h5py.File(fname, 'w')

		f['data_r'] = np.squeeze(np.real(self.data).transpose())
		f['data_i'] = np.squeeze(np.imag(self.data).transpose())

		if (self.noise != 0):
			f['noise_r'] = np.squeeze(np.real(self.noise).transpose())
			f['noise_i'] = np.squeeze(np.imag(self.noise).transpose())
		
		if (self.acs != 0):
			f['acs_r'] = np.squeeze(np.real(self.acs).transpose())
			f['acs_i'] = np.squeeze(np.imag(self.acs).transpose())
		
		if (self.sync.any() != 0):
                        f['sync']  = self.sync.transpose()

		f.close()

		if (verbose):
			print '    ... saved in %(time).1f s.\n' % {"time": time.clock()-start}

		return
def demo(print_times=True, print_grammar=True,
         print_trees=True, print_sentence=True,
         trace=1,
         parser=FeatureChartParser,
         sent='I saw John with a dog with my cookie'):
    import sys, time
    print()
    grammar = demo_grammar()
    if print_grammar:
        print(grammar)
        print()
    print("*", parser.__name__)
    if print_sentence:
        print("Sentence:", sent)
    tokens = sent.split()
    t = time.clock()
    cp = parser(grammar, trace=trace)
    chart = cp.chart_parse(tokens)
    trees = list(chart.parses(grammar.start()))
    if print_times:
        print("Time: %s" % (time.clock() - t))
    if print_trees:
        for tree in trees: print(tree)
    else:
        print("Nr trees:", len(trees))
Esempio n. 26
0
    def do_effect(self, can_msg, args):  # read full packet from serial port
        if args.get('action') == 'read':
            can_msg = self.do_read(can_msg)
        elif args.get('action') == 'write':
            # KOSTYL: workaround for BMW e90 bus
            if self._restart and self._run and (time.clock() - self.last) >= self.act_time:
                self.dev_write(0, "O")
                self.last = time.clock()
            self.do_write(can_msg)
        else:
            self.dprint(1, 'Command ' + args['action'] + ' not implemented 8(')


        """
        if self._restart:
                if self.wait_for and can_msg.debugData and can_msg.debugText['text'][0] == "F" and len(can_msg.debugText['text']) > 2:
                    error = int(can_msg.debugText['text'][1:3],16)
                    self.dprint(1,"BUS ERROR:" + hex(error))
                    if error & 8: # Fix for BMW CAN where it could be overloaded
                       self.dev_write(0, "C")
                       time.sleep(0.4)
                       self.dev_write(0, "O")
                       can_msg.debugText['do_not_send'] = True
                    else:
                       can_msg.debugText['please_send'] = True
                    self.wait_for = False

                elif time.clock() - self.last >= self.act_time and not self.wait_for:
                    self.dev_write(0, "F")
                    self.wait_for = True
                    self.last = time.clock()
        """

        return can_msg
Esempio n. 27
0
def printOutput(config, outputDirName, varDF):
    '''Output run statistics and variant details to the specified output directory.'''

    startTime = time.clock()
    print("\n=== Writing output files to {0}/ ===".format(outputDirName))
    ow = output.Writer()
    # identify formats ending in 'xlsx' as the "excel formats," requiring XlsxWriter
    excel_formats = [ plugin_name for plugin_name,file_name in ow.file_names.items() if os.path.splitext(file_name)[1]==".xlsx" ]
    if "all" in config.outputFormats:
        # replace output type 'all' with a lsit of all supported output types
        #    and remove 'all' and 'default' to prevent recursive execution of the modules
        config.outputFormats = ow.supported_formats.keys()
        config.outputFormats.remove('all')
        config.outputFormats.remove('default')
        if 'xlsxwriter' not in sys.modules and excel_formats:
            # if xlsxwriter is not present and the user selected excel output formats, remove excel formats from output formats
            config.outputFormats = [ x for x in config.outputFormats if x not in excel_formats ]
            throwWarning("xlsxwriter module not found; Excel outputs disabled")

    for format in config.outputFormats:
        ow.write(varDF,format,outputDirName,config)

    totalTime = time.clock() - startTime
    print("\tTime to write: {0:02d}:{1:02d}".format(int(totalTime/60), int(totalTime % 60)))
    return 0
Esempio n. 28
0
def executeOneSetting(tensor, density, roundId, para):
    logger.info('density=%.2f, %2d-round starts.'%(density, roundId + 1))
    (numUser, numService, numTime) = tensor.shape

    # remove the entries of data to generate trainTensor and testTensor
    (trainTensor, testTensor) = evallib.removeTensor(tensor, density, roundId, para) 

    # invocation to the prediction function
    startTime = time.clock() # to record the running time for one round             
    predictedTensor = Average.predict(trainTensor, para) 
    runningTime = float(time.clock() - startTime) / numTime

    # evaluate the prediction error 
    for sliceId in xrange(numTime):
        testMatrix = testTensor[:, :, sliceId]
        predictedMatrix = predictedTensor[:, :, sliceId]
        (testVecX, testVecY) = np.where(testMatrix)
        testVec = testMatrix[testVecX, testVecY]
        predVec = predictedMatrix[testVecX, testVecY]
        evalResult = evallib.errMetric(testVec, predVec, para['metrics'])        
        result = (evalResult, runningTime)

        # dump the result at each density
        outFile = '%s%s_%s_result_%02d_%.2f_round%02d.tmp'%(para['outPath'], 
            para['dataName'], para['dataType'], sliceId + 1, density, roundId + 1)
        evallib.dumpresult(outFile, result)
        
    logger.info('density=%.2f, %2d-round done.'%(density, roundId + 1))
    logger.info('----------------------------------------------')
def update(params):
	# Descarga el ZIP
	xbmc.output("[updater.py] update")
	xbmc.output("[updater.py] cwd="+os.getcwd())
	remotefilename = REMOTE_FILE+params.get("version")+".zip"
	localfilename = LOCAL_FILE+params.get("version")+".zip"
	xbmc.output("[updater.py] remotefilename=%s" % remotefilename)
	xbmc.output("[updater.py] localfilename=%s" % localfilename)
	xbmc.output("[updater.py] descarga fichero...")
	inicio = time.clock()
	urllib.urlretrieve(remotefilename,localfilename)
	fin = time.clock()
	xbmc.output("[updater.py] Descargado en %d segundos " % (fin-inicio+1))
	
	# Lo descomprime
	xbmc.output("[updater.py] descomprime fichero...")
	import ziptools
	unzipper = ziptools.ziptools()
	destpathname = DESTINATION_FOLDER
	xbmc.output("[updater.py] destpathname=%s" % destpathname)
	unzipper.extract(localfilename,destpathname)
	
	# Borra el zip descargado
	xbmc.output("[updater.py] borra fichero...")
	os.remove(localfilename)
def solveUnconstrainedOptimizationAndOutputStatus(optimizationSolver):
    opt = optimizationSolver(p1)
    start = time.clock() 
    output = opt.solve()
    elapsed = time.clock() - start
    outputStatus(output, elapsed)
    plotRosenbrockFunctionAndOptimizationPath(output)
Esempio n. 31
0

def ray_ds(ds, ray, name):
    ds[name + ".origin"] = ray.origin.to_ds()
    ds[name + ".dir"] = ray.dir.to_ds()


def random_ray(ds):
    origin = Vector3(0.0, 0.0, 0.0)
    direction = Vector3(random(), random(), random())
    direction.normalize()
    ray = Ray(origin, direction)
    ray_ds(ds, ray, 'ray1')
    return ray


for i in range(5):
    ray = random_ray(ds)
    start = time.clock()
    hit = linear.isect(ray)
    end = time.clock()
    dur1 = end - start
    start = time.clock()
    runtime.run('test')
    end = time.clock()
    dur2 = end - start
    if hit:
        print(hit.t, ds['hp1.t'], ds['ret'], dur1, dur2)
    else:
        print("False ", ds['ret'], dur1, dur2)
Esempio n. 32
0
    if arrayLen == 2:
        if array[1] < array[0]:
            array[0], array[1] = array[1], array[0]
        return array
    lo, hi, pivot = partition(array)
    lo = quickSort(lo)
    hi = quickSort(hi)
    sortedArray = lo
    sortedArray.extend(pivot)
    sortedArray.extend(hi)
    return sortedArray

# 可以将输入直接转列表
originArray = eval(input('input an array: '))

start1 = time.clock()
sortedArray1 = mergeSort(originArray)
end1 = time.clock()

start2 = time.clock()
sortedArray2 = selectionSort(originArray)
end2 = time.clock()

start3 = time.clock()
sortedArray3 = selectionSort(originArray)
end3 = time.clock()

start4 = time.clock()
sortedArray4 = quickSort(originArray)
end4 = time.clock()
Esempio n. 33
0
	# print 'Doing period',period

	# x, y = amplitude*np.sin(2*np.pi*t/period), amplitude*np.cos(2*np.pi*t/period) # smooth
	# folded = t % period
	x, y = amplitude*np.random.randn(t.shape[0]),amplitude*np.random.randn(t.shape[0])

	f = 20*np.ones(ncad) + np.sin(t/6.) # make this whatever function you like! 
	f[400:500] *= 0.990 # toy transit


	'''------------------------
	Define a PSF and aperture
	------------------------'''

	width = 3.
	start = clock()

	nx, ny = 10, 10
	npix = nx*ny
	pixels = np.zeros((nx,ny))

	'''------------------------
	Simulate data
	------------------------'''

	tpf = np.zeros((nx,ny,ncad))
	sensitivity = 1-0.1*np.random.rand(nx,ny)
	white = 0

	for j in range(ncad):
	    tpf[:,:,j] = f[j]*gaussian_psf(pixels,x[j],y[j],width)*sensitivity + np.random.randn(nx,ny)*white
Esempio n. 34
0
# This script copies injury and fatality data from Transbase database.
#Last modified: 11/30/2017 by Jonathan Engelbert
#
### No Known Issues
################################################################################################

import arcpy
from arcpy import env
import sys, string, os, time, datetime

# SET TO OVERWRITE
arcpy.env.overwriteOutput = True

# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime

try:
    myStartDate = str(datetime.date.today())
    myStartTime = time.clock()
    theStartTime = time.ctime()
    # thisfile = os.path.realpath(__file__)
    file = open("C:/ETLs/TIM/TIMUpdates/Logs/" + myStartDate + "Transbase2" + ".txt", "w")
    file.write(theStartTime + "\n")
    when =datetime.date.today()
    theDate = when.strftime("%d")
    theDay=when.strftime("%A")
    print theDay
Esempio n. 35
0
File: wordnet.py Progetto: oier/Yaki
        if self.searcher:
            return self.searcher.document(word=word)["syns"]
        else:
            return synonyms(self.w2n, self.n2w, word)


if __name__ == "__main__":
    from time import clock
    from whoosh.filedb.filestore import FileStorage
    st = FileStorage("c:/testindex")

    #    t = clock()
    #    th = Thesaurus.from_filename("c:/wordnet/wn_s.pl")
    #    print clock() - t
    #
    #    t = clock()
    #    th.to_storage(st)
    #    print clock() - t
    #
    #    t = clock()
    #    print th.synonyms("light")
    #    print clock() - t

    t = clock()
    th = Thesaurus.from_storage(st)
    print clock() - t

    t = clock()
    print th.synonyms("hail")
    print clock() - t
Esempio n. 36
0
    def draw(self, seconds, beats, uniforms, additionalTextureUniforms=None):
        if not self.shaders:
            # compiler errors
            return

        isProfiling = Profiler.instance and Profiler.instance.isVisible(
        ) and Profiler.instance.isProfiling() and self._debugPassId is None
        if isProfiling:
            self.profileLog = []
            glFinish()
            startT = time.clock()
        else:
            startT = time.clock()

        maxActiveInputs = 0
        for i, passData in enumerate(self.passes):
            if not self.__passDirtyState[i]:
                continue

            if self.passes[i].is3d:
                bail = False
                for buffer in self.colorBuffers[passData.targetBufferId]:
                    if isinstance(buffer, Texture3D):
                        # can't rebake
                        bail = True
                        break
                if bail:
                    continue

            self.__passDirtyState[i] = self.passes[
                i].realtime  # dirty again only if realtime

            uniforms['uSeconds'] = seconds
            uniforms['uBeats'] = beats
            uniforms['uResolution'] = self.frameBuffers[passData.targetBufferId].width(), \
                                      self.frameBuffers[passData.targetBufferId].height()

            if i >= len(self.shaders) or self.shaders[i] == 0:
                self._rebuild(None, index=i)

            # make sure we don't take into account previous GL calls when measuring time
            if isProfiling:
                glFinish()
                beforeT = time.clock()

            self.frameBuffers[passData.targetBufferId].use()

            glUseProgram(self.shaders[i])

            activeInputs = self._bindInputs(i, additionalTextureUniforms)

            fn = (glUniform1f, glUniform2f, glUniform3f, glUniform4f)
            for name in uniforms:
                if isinstance(uniforms[name], (int, long)):
                    glActiveTexture(GL_TEXTURE0 + activeInputs)
                    glBindTexture(GL_TEXTURE_2D, uniforms[name])
                    glUniform1i(glGetUniformLocation(self.shaders[i], name),
                                activeInputs)
                    activeInputs += 1
                elif isinstance(uniforms[name], float):
                    fn[0](glGetUniformLocation(self.shaders[i], name),
                          uniforms[name])
                elif len(uniforms[name]) == 9:
                    glUniformMatrix3fv(
                        glGetUniformLocation(self.shaders[i], name), 1, False,
                        (ctypes.c_float * 9)(*uniforms[name]))
                elif len(uniforms[name]) == 16:
                    glUniformMatrix4fv(
                        glGetUniformLocation(self.shaders[i], name), 1, False,
                        (ctypes.c_float * 16)(*uniforms[name]))
                else:
                    fn[len(uniforms[name]) - 1](glGetUniformLocation(
                        self.shaders[i], name), *uniforms[name])

            for name in passData.uniforms:
                if isinstance(passData.uniforms[name], float):
                    fn[0](glGetUniformLocation(self.shaders[i], name),
                          passData.uniforms[name])
                else:
                    fn[len(passData.uniforms[name]) - 1](glGetUniformLocation(
                        self.shaders[i], name), *passData.uniforms[name])

            maxActiveInputs = max(maxActiveInputs, activeInputs)

            if self.passes[i].drawCommand is not None:
                exec(self.passes[i].drawCommand)
            else:
                glRecti(-1, -1, 1, 1)

            # duct tape the 2D color buffer(s) into 3D color buffer(s)
            if self.passes[i].is3d:
                buffers = self.colorBuffers[passData.targetBufferId]
                for j, buffer in enumerate(buffers):
                    buffer.use()
                    data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT)
                    FrameBuffer.clear()
                    buffers[j] = Texture3D(Texture.RGBA32F, buffer.height(),
                                           True, data)
                    buffers[j].original = buffer

            # enable mip mapping on static textures
            if not self.passes[i].realtime:
                # after rendering grab all render targets & enable mip maps, then generate them
                for buffer in self.colorBuffers[passData.targetBufferId]:
                    buffer.use()
                    mode = GL_TEXTURE_3D if isinstance(
                        buffer, Texture3D) else GL_TEXTURE_2D
                    glTexParameteri(mode, GL_TEXTURE_MIN_FILTER,
                                    GL_LINEAR_MIPMAP_LINEAR)
                    glTexParameteri(mode, GL_TEXTURE_MAG_FILTER, GL_LINEAR)

                    # requires openGL 4.6?
                    glTexParameterf(
                        mode, texture_filter_anisotropic.
                        GL_TEXTURE_MAX_ANISOTROPY_EXT, 16.0)

                    glGenerateMipmap(mode)

            # make sure all graphics calls are finished processing in GL land before we measure time
            if isProfiling:
                glFinish()
                afterT = time.clock()
                self.profileLog.append((passData.name
                                        or str(i), afterT - beforeT))

            if self._debugPassId is not None and i == self._debugPassId[0]:
                # debug mode, we want to view this pass on the screen, avoid overwriting it's buffers with future passes
                break

        if isProfiling:
            glFinish()
        # inform the profiler a new result is ready
        endT = time.clock()
        self.profileInfoChanged.emit(endT - startT)

        return maxActiveInputs
Esempio n. 37
0
    from pprint import pprint

    pprint(ToLatLon.from_cc2(pd.Series(['RU', 'SI'])))

    coords = [
        (46.0555, 14.5083),
        (40.7127, -74.0059),
        (49.761667, -77.802778),
        (31.814700, 79.886838),
        (52.818925, 92.567674),
        (61.760153, -121.236525),
        (64.295556,-15.227222),
        (0, 0),
    ]

    start = time.clock()
    pprint(latlon2region(coords, 0))
    elpassed = time.clock() - start
    print(elpassed)
    print()

    start = time.clock()
    pprint(latlon2region(coords, 1))
    elpassed = time.clock() - start
    print(elpassed)
    print()

    start = time.clock()
    pprint(latlon2region(coords, 2))
    elpassed = time.clock() - start
    print(elpassed)
Esempio n. 38
0
 def stop(self):
     elapsed = time.clock() - self._timings[self._name][0]
     self._timings[self._name][1] += elapsed
     self._timings[self._name][2] += 1
Esempio n. 39
0
 def pause(self):
     self._paused = time.clock()
Esempio n. 40
0
def timedsum1(n, zero):
    start=time.clock()
    tot=sum(range(n), zero)
    stend=time.clock()
    return type(zero), stend-start, tot
Esempio n. 41
0
    def solve(self):
        """
        Runs the simulation.
        """
        result_handler = self.result_handler
        h = (self.final_time - self.start_time) / self.ncp
        grid = N.linspace(self.start_time, self.final_time, self.ncp + 1)[:-1]

        status = 0
        final_time = 0.0

        #For result writing
        result_handler.integration_point()

        #Start of simulation, start the clock
        time_start = time.clock()

        for t in grid:
            status = self.model.do_step(t, h)
            self.status = status

            if status != 0:

                if status == fmi.FMI_ERROR:
                    result_handler.simulation_end()
                    raise Exception(
                        "The simulation failed. See the log for more information. Return flag %d."
                        % status)

                elif status == fmi.FMI_DISCARD and isinstance(
                        self.model, fmi.FMUModelCS1):

                    try:
                        last_time = self.model.get_real_status(
                            fmi.FMI1_LAST_SUCCESSFUL_TIME)
                        if last_time > t:  #Solver succeeded in taken a step a little further than the last time
                            self.model.time = last_time
                            final_time = last_time
                            result_handler.integration_point()
                    except fmi.FMUException:
                        pass
                break
                #result_handler.simulation_end()
                #raise Exception("The simulation failed. See the log for more information. Return flag %d"%status)

            final_time = t + h

            result_handler.integration_point()

            if self.input_traj != None:
                self.model.set(self.input_traj[0],
                               self.input_traj[1].eval(t + h)[0, :])

        #End of simulation, stop the clock
        time_stop = time.clock()

        result_handler.simulation_end()

        if self.status != 0:
            print(
                'Simulation terminated prematurely. See the log for possibly more information. Return flag %d.'
                % status)

        #Log elapsed time
        print('Simulation interval    : ' + str(self.start_time) + ' - ' +
              str(final_time) + ' seconds.')
        print('Elapsed simulation time: ' + str(time_stop - time_start) +
              ' seconds.')
Esempio n. 42
0
 def resume(self):
     elapsed = time.clock() - self._paused
     self._timings[self._name][0] += elapsed
 def wrapper(*args, **kwargs):
     t = time.clock()
     res = func(*args, **kwargs)
     print("\t%s" % func.__name__, time.clock()-t)
     return res
Esempio n. 44
0
    def start(self, name):
        self._name = name
        if name not in self._timings:
            self._timings[name] = [0.] * 3

        self._timings[name][0] = time.clock()
Esempio n. 45
0
    B = mx.sym.Variable('B')
    C = mx.symbol.dot(A, B)

    executor = C.simple_bind(mx.gpu(1),
                             'write',
                             A=(4096, 4096),
                             B=(4096, 4096))

    a = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
    b = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))

    a.copyto(executor.arg_dict['A'])
    b.copyto(executor.arg_dict['B'])

    flag = False
    print "execution begin"
    for i in range(args.iter_num):
        if i == args.begin_profiling_iter:
            t0 = time.clock()
            mx.profiler.profiler_set_state('run')
        if i == args.end_profiling_iter:
            t1 = time.clock()
            mx.profiler.profiler_set_state('stop')
        executor.forward()
        c = executor.outputs[0]
        c.wait_to_read()
    print "execution end"
    duration = t1 - t0
    print('duration: {0}s'.format(duration))
    print('          {0}ms/operator'.format(duration * 1000 / args.iter_num))
Esempio n. 46
0
l1_b = []
l2_b = []

train_loss = []
test_loss = []
test_mean_loss = []

prev_loss = -1
loss_std = 0

loss_rate = []

# Learning loop
for epoch in xrange(1, n_epoch + 1):
    print('epoch', epoch)
    start_time = time.clock()

    # training
    perm = np.random.permutation(N)
    sum_loss = 0
    for i in xrange(0, N, batchsize):
        x_batch = x_train[perm[i:i + batchsize]]
        y_batch = y_train[perm[i:i + batchsize]]

        optimizer.zero_grads()
        loss = forward(x_batch, y_batch)
        loss.backward()
        optimizer.update()

        train_loss.append(loss.data)
        sum_loss += float(cuda.to_cpu(loss.data)) * batchsize
Esempio n. 47
0
def read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr,
         model, calc_bearing, filt_bearing, chunk):  #cog = 1,
    '''
    Read a .DAT and associated set of .SON files recorded by a Humminbird(R)
    instrument.

    Parse the data into a set of memory mapped files that will
    subsequently be used by the other functions of the PyHum module.

    Export time-series data and metadata in other formats.

    Create a kml file for visualising boat track

    Syntax
    ----------
    [] = PyHum.read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr, chunksize, model, calc_bearing, filt_bearing, chunk)

    Parameters
    ------------
    humfile : str
       path to the .DAT file
    sonpath : str
       path where the *.SON files are
    cs2cs_args : int, *optional* [Default="epsg:26949"]
       arguments to create coordinates in a projected coordinate system
       this argument gets given to pyproj to turn wgs84 (lat/lon) coordinates
       into any projection supported by the proj.4 libraries
    c : float, *optional* [Default=1450.0]
       speed of sound in water (m/s). Defaults to a value of freshwater
    draft : float, *optional* [Default=0.3]
       draft from water surface to transducer face (m)
    doplot : float, *optional* [Default=1]
       if 1, plots will be made
    t : float, *optional* [Default=0.108]
       length of transducer array (m).
       Default value is that of the 998 series Humminbird(R)
    bedpick : int, *optional* [Default=1]
       if 1, bedpicking with be carried out automatically
       if 0, user will be prompted to pick the bed location on screen
    flip_lr : int, *optional* [Default=0]
       if 1, port and starboard scans will be flipped
       (for situations where the transducer is flipped 180 degrees)
    model: int, *optional* [Default=998]
       A 3 or 4 number code indicating the model number
       Examples: 998, 997, 1198, 1199
    calc_bearing : float, *optional* [Default=0]
       if 1, bearing will be calculated from coordinates
    filt_bearing : float, *optional* [Default=0]
       if 1, bearing will be filtered
    chunk : str, *optional* [Default='d100' (distance, 100 m)]
       letter, followed by a number.
       There are the following letter options:
       'd' - parse chunks based on distance, then number which is distance in m
       'p' - parse chunks based on number of pings, then number which is number of pings
       'h' - parse chunks based on change in heading, then number which is the change in heading in degrees
       '1' - process just 1 chunk

    Returns
    ---------
    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the port side
        sidescan sonar (where present)

    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the starboard side
        sidescan sonar (where present)

    sonpath+base+'_data_dwnhi.dat': memory-mapped file
        contains the raw echogram from the high-frequency
        echosounder (where present)

    sonpath+base+'_data_dwnlow.dat': memory-mapped file
        contains the raw echogram from the low-frequency
        echosounder (where present)

    sonpath+base+"trackline.kml": google-earth kml file
        contains the trackline of the vessel during data
        acquisition

    sonpath+base+'rawdat.csv': comma separated value file
        contains time-series data. columns corresponding to
        longitude
        latitude
        easting (m)
        northing (m)
        depth to bed (m)
        alongtrack cumulative distance (m)
        vessel heading (deg.)

    sonpath+base+'meta.mat': .mat file
        matlab format file containing a dictionary object
        holding metadata information. Fields are:
        e : ndarray, easting (m)
        n : ndarray, northing (m)
        es : ndarray, low-pass filtered easting (m)
        ns : ndarray, low-pass filtered northing (m)
        lat : ndarray, latitude
        lon : ndarray, longitude
        shape_port : tuple, shape of port scans in memory mapped file
        shape_star : tuple, shape of starboard scans in memory mapped file
        shape_hi : tuple, shape of high-freq. scans in memory mapped file
        shape_low : tuple, shape of low-freq. scans in memory mapped file
        dep_m : ndarray, depth to bed (m)
        dist_m : ndarray, distance along track (m)
        heading : ndarray, heading of vessel (deg. N)
        pix_m: float, size of 1 pixel in across-track dimension (m)
        bed : ndarray, depth to bed (m)
        c : float, speed of sound in water (m/s)
        t : length of sidescan transducer array (m)
        spd : ndarray, vessel speed (m/s)
        time_s : ndarray, time elapsed (s)
        caltime : ndarray, unix epoch time (s)
    '''

    # prompt user to supply file if no input file given
    if not humfile:
        print('An input file is required!!!!!!')
        Tk().withdraw(
        )  # we don't want a full GUI, so keep the root window from appearing
        humfile = askopenfilename(filetypes=[("DAT files", "*.DAT")])

    # prompt user to supply directory if no input sonpath is given
    if not sonpath:
        print('A *.SON directory is required!!!!!!')
        Tk().withdraw(
        )  # we don't want a full GUI, so keep the root window from appearing
        sonpath = askdirectory()

    # print given arguments to screen and convert data type where necessary
    if humfile:
        print('Input file is %s' % (humfile))

    if sonpath:
        print('Son files are in %s' % (sonpath))

    if cs2cs_args:
        print('cs2cs arguments are %s' % (cs2cs_args))

    if draft:
        draft = float(draft)
        print('Draft: %s' % (str(draft)))

    if c:
        c = float(c)
        print('Celerity of sound: %s m/s' % (str(c)))

    if doplot:
        doplot = int(doplot)
        if doplot == 0:
            print("Plots will not be made")

    if flip_lr:
        flip_lr = int(flip_lr)
        if flip_lr == 1:
            print("Port and starboard will be flipped")

    if t:
        t = np.asarray(t, float)
        print('Transducer length is %s m' % (str(t)))

    if bedpick:
        bedpick = np.asarray(bedpick, int)
        if bedpick == 1:
            print('Bed picking is auto')
        elif bedpick == 0:
            print('Bed picking is manual')
        else:
            print('User will be prompted per chunk about bed picking method')

    if chunk:
        chunk = str(chunk)
        if chunk[0] == 'd':
            chunkmode = 1
            chunkval = int(chunk[1:])
            print('Chunks based on distance of %s m' % (str(chunkval)))
        elif chunk[0] == 'p':
            chunkmode = 2
            chunkval = int(chunk[1:])
            print('Chunks based on %s pings' % (str(chunkval)))
        elif chunk[0] == 'h':
            chunkmode = 3
            chunkval = int(chunk[1:])
            print('Chunks based on heading devation of %s degrees' %
                  (str(chunkval)))
        elif chunk[0] == '1':
            chunkmode = 4
            chunkval = 1
            print('Only 1 chunk will be produced')
        else:
            print(
                "Chunk mode not understood - should be 'd', 'p', or 'h' - using defaults"
            )
            chunkmode = 1
            chunkval = 100
            print('Chunks based on distance of %s m' % (str(chunkval)))

    if model:
        try:
            model = int(model)
            print("Data is from the %s series" % (str(model)))
        except:
            if model == 'onix':
                model = 0
                print("Data is from the ONIX series")
            elif model == 'helix':
                model = 1
                print("Data is from the HELIX series")
            elif model == 'mega':
                model = 2
                print("Data is from the MEGA series")
#    if cog:
#       cog = int(cog)
#       if cog==1:
#          print "Heading based on course-over-ground"

    if calc_bearing:
        calc_bearing = int(calc_bearing)
        if calc_bearing == 1:
            print("Bearing will be calculated from coordinates")

    if filt_bearing:
        filt_bearing = int(filt_bearing)
        if filt_bearing == 1:
            print("Bearing will be filtered")

    ## for debugging
    #humfile = r"test.DAT"; sonpath = "test_data"
    #cs2cs_args = "epsg:26949"; doplot = 1; draft = 0
    #c=1450; bedpick=1; fliplr=1; chunk = 'd100'
    #model=998; cog=1; calc_bearing=0; filt_bearing=0

    #if model==2:
    #   f = 1000
    #else:
    f = 455

    try:
        print(
            "Checking the epsg code you have chosen for compatibility with Basemap ... "
        )
        from mpl_toolkits.basemap import Basemap
        m = Basemap(projection='merc',
                    epsg=cs2cs_args.split(':')[1],
                    resolution='i',
                    llcrnrlon=10,
                    llcrnrlat=10,
                    urcrnrlon=30,
                    urcrnrlat=30)
        del m
        print("... epsg code compatible")
    except (ValueError):
        print(
            "Error: the epsg code you have chosen is not compatible with Basemap"
        )
        print(
            "please choose a different epsg code (http://spatialreference.org/)"
        )
        print("program will now close")
        sys.exit()

    # start timer
    if os.name == 'posix':  # true if linux/mac or cygwin on windows
        start = time.time()
    else:  # windows
        start = time.clock()

    # if son path name supplied has no separator at end, put one on
    if sonpath[-1] != os.sep:
        sonpath = sonpath + os.sep

    # get the SON files from this directory
    sonfiles = glob.glob(sonpath + '*.SON')
    if not sonfiles:
        sonfiles = glob.glob(os.getcwd() + os.sep + sonpath + '*.SON')

    base = humfile.split('.DAT')  # get base of file name for output
    base = base[0].split(os.sep)[-1]

    # remove underscores, negatives and spaces from basename
    base = humutils.strip_base(base)

    print("WARNING: Because files have to be read in byte by byte,")
    print("this could take a very long time ...")

    #reading each sonfile in parallel should be faster ...
    try:
        o = Parallel(n_jobs=np.min([len(sonfiles), cpu_count()]), verbose=0)(
            delayed(getscans)(sonfiles[k], humfile, c, model, cs2cs_args)
            for k in range(len(sonfiles)))
        X, Y, A, B = zip(*o)

        for k in range(len(Y)):
            if Y[k] == 'sidescan_port':
                dat = A[k]  #data.gethumdat()
                metadat = B[k]  #data.getmetadata()
                if flip_lr == 0:
                    data_port = X[k].astype('int16')
                else:
                    data_star = X[k].astype('int16')

            elif Y[k] == 'sidescan_starboard':
                if flip_lr == 0:
                    data_star = X[k].astype('int16')
                else:
                    data_port = X[k].astype('int16')

            elif Y[k] == 'down_lowfreq':
                data_dwnlow = X[k].astype('int16')

            elif Y[k] == 'down_highfreq':
                data_dwnhi = X[k].astype('int16')

            elif Y[k] == 'down_vhighfreq':  #hopefully this only applies to mega systems
                data_dwnhi = X[k].astype('int16')

        del X, Y, A, B, o
        old_pyread = 0

        if 'data_port' not in locals():
            data_port = ''
            print("portside scan not available")

        if 'data_star' not in locals():
            data_star = ''
            print("starboardside scan not available")

        if 'data_dwnhi' not in locals():
            data_dwnlow = ''
            print("high-frq. downward scan not available")

        if 'data_dwnlow' not in locals():
            data_dwnlow = ''
            print("low-frq. downward scan not available")

    except:  # revert back to older version if paralleleised version fails

        print(
            "something went wrong with the parallelised version of pyread ...")

        try:
            import pyread
        except:
            from . import pyread

        data = pyread.pyread(sonfiles, humfile, c, model, cs2cs_args)

        dat = data.gethumdat()

        metadat = data.getmetadata()

        old_pyread = 1

    nrec = len(metadat['n'])

    metadat['instr_heading'] = metadat['heading'][:nrec]

    #metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing, cog, metadat['lat'], metadat['lon'], metadat['instr_heading'])

    try:
        es = humutils.runningMeanFast(metadat['e'][:nrec],
                                      len(metadat['e'][:nrec]) / 100)
        ns = humutils.runningMeanFast(metadat['n'][:nrec],
                                      len(metadat['n'][:nrec]) / 100)
    except:
        es = metadat['e'][:nrec]
        ns = metadat['n'][:nrec]

    metadat['es'] = es
    metadat['ns'] = ns

    try:
        trans = pyproj.Proj(init=cs2cs_args)
    except:
        trans = pyproj.Proj(cs2cs_args.lstrip(), inverse=True)

    lon, lat = trans(es, ns, inverse=True)
    metadat['lon'] = lon
    metadat['lat'] = lat

    metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing,
                                              metadat['lat'], metadat['lon'],
                                              metadat['instr_heading'])  #cog

    dist_m = humutils.get_dist(lat, lon)
    metadat['dist_m'] = dist_m

    if calc_bearing == 1:  # recalculate speed, m/s
        ds = np.gradient(np.squeeze(metadat['time_s']))
        dx = np.gradient(np.squeeze(metadat['dist_m']))
        metadat['spd'] = dx[:nrec] / ds[:nrec]

    # theta at 3dB in the horizontal
    theta3dB = np.arcsin(c / (t * (f * 1000)))
    #resolution of 1 sidescan pixel to nadir
    ft = (np.pi / 2) * (1 / theta3dB)  #/ (f/455)

    dep_m = humutils.get_depth(metadat['dep_m'][:nrec])

    if old_pyread == 1:  #older pyread version

        # port scan
        try:
            if flip_lr == 0:
                data_port = data.getportscans().astype('int16')
            else:
                data_port = data.getstarscans().astype('int16')
        except:
            data_port = ''
            print("portside scan not available")

    if data_port != '':

        Zt, ind_port = makechunks_scan(chunkmode, chunkval, metadat, data_port,
                                       0)
        del data_port

        ## create memory mapped file for Z
        shape_port = io.set_mmap_data(sonpath, base, '_data_port.dat', 'int16',
                                      Zt)

        ##we are only going to access the portion of memory required
        port_fp = io.get_mmap_data(sonpath, base, '_data_port.dat', 'int16',
                                   shape_port)

    if old_pyread == 1:  #older pyread version
        # starboard scan
        try:
            if flip_lr == 0:
                data_star = data.getstarscans().astype('int16')
            else:
                data_star = data.getportscans().astype('int16')
        except:
            data_star = ''
            print("starboardside scan not available")

    if data_star != '':

        Zt, ind_star = makechunks_scan(chunkmode, chunkval, metadat, data_star,
                                       1)
        del data_star

        # create memory mapped file for Z
        shape_star = io.set_mmap_data(sonpath, base, '_data_star.dat', 'int16',
                                      Zt)

        star_fp = io.get_mmap_data(sonpath, base, '_data_star.dat', 'int16',
                                   shape_star)

    if 'star_fp' in locals() and 'port_fp' in locals():
        # check that port and starboard are same size
        # and trim if not
        if np.shape(star_fp) != np.shape(port_fp):
            print(
                "port and starboard scans are different sizes ... rectifying")
            if np.shape(port_fp[0])[1] > np.shape(star_fp[0])[1]:
                tmp = port_fp.copy()
                tmp2 = np.empty_like(star_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(star_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_port = io.set_mmap_data(sonpath, base, '_data_port2.dat',
                                              'int16', tmp2)
                #shape_star = shape_port.copy()
                shape_star = tuple(np.asarray(shape_port).copy())

                ##we are only going to access the portion of memory required
                port_fp = io.get_mmap_data(sonpath, base, '_data_port2.dat',
                                           'int16', shape_port)

                ind_port = list(ind_port)
                ind_port[-1] = np.shape(star_fp[0])[1]
                ind_port = tuple(ind_port)

            elif np.shape(port_fp[0])[1] < np.shape(star_fp[0])[1]:
                tmp = star_fp.copy()
                tmp2 = np.empty_like(port_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(port_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_port = io.set_mmap_data(sonpath, base, '_data_star2.dat',
                                              'int16', tmp2)
                #shape_star = shape_port.copy()
                shape_star = tuple(np.asarray(shape_port).copy())

                #we are only going to access the portion of memory required
                star_fp = io.get_mmap_data(sonpath, base, '_data_star2.dat',
                                           'int16', shape_star)

                ind_star = list(ind_star)
                ind_star[-1] = np.shape(port_fp[0])[1]
                ind_star = tuple(ind_star)

    if old_pyread == 1:  #older pyread version
        # low-freq. sonar
        try:
            data_dwnlow = data.getlowscans().astype('int16')
        except:
            data_dwnlow = ''
            print("low-freq. scan not available")

    if data_dwnlow != '':

        Zt, ind_low = makechunks_scan(chunkmode, chunkval, metadat,
                                      data_dwnlow, 2)
        del data_dwnlow

        # create memory mapped file for Z
        shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow.dat',
                                     'int16', Zt)

        ##we are only going to access the portion of memory required
        dwnlow_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat',
                                     'int16', shape_low)

    if old_pyread == 1:  #older pyread version
        # hi-freq. sonar
        try:
            data_dwnhi = data.gethiscans().astype('int16')
        except:
            data_dwnhi = ''
            print("high-freq. scan not available")

    if data_dwnhi != '':

        Zt, ind_hi = makechunks_scan(chunkmode, chunkval, metadat, data_dwnhi,
                                     3)
        del data_dwnhi

        # create memory mapped file for Z
        shape_hi = io.set_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16',
                                    Zt)

        dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16',
                                    shape_hi)

    if 'dwnhi_fp' in locals() and 'dwnlow_fp' in locals():
        # check that low and high are same size
        # and trim if not
        if (np.shape(dwnhi_fp) != np.shape(dwnlow_fp)) and (chunkmode != 4):
            print("dwnhi and dwnlow are different sizes ... rectifying")
            if np.shape(dwnhi_fp[0])[1] > np.shape(dwnlow_fp[0])[1]:
                tmp = dwnhi_fp.copy()
                tmp2 = np.empty_like(dwnlow_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(dwnlow_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_low = io.set_mmap_data(sonpath, base, '_data_dwnhi2.dat',
                                             'int16', tmp2)
                #shape_hi = shape_low.copy()
                shape_hi = tuple(np.asarray(shape_low).copy())

                ##we are only going to access the portion of memory required
                dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi2.dat',
                                            'int16', shape_hi)

                ind_hi = list(ind_hi)
                ind_hi[-1] = np.shape(dwnlow_fp[0])[1]
                ind_hi = tuple(ind_hi)

            elif np.shape(dwnhi_fp[0])[1] < np.shape(dwnlow_fp[0])[1]:
                tmp = dwnlow_fp.copy()
                tmp2 = np.empty_like(dwnhi_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(dwnhi_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_low = io.set_mmap_data(sonpath, base,
                                             '_data_dwnlow2.dat', 'int16',
                                             tmp2)
                #shape_hi = shape_low.copy()
                shape_hi = tuple(np.asarray(shape_low).copy())

                ##we are only going to access the portion of memory required
                dwnlow_fp = io.get_mmap_data(sonpath, base,
                                             '_data_dwnlow2.dat', 'int16',
                                             shape_low)

                ind_low = list(ind_low)
                ind_low[-1] = np.shape(dwnhi_fp[0])[1]
                ind_low = tuple(ind_low)

    if old_pyread == 1:  #older pyread version
        del data

    if ('shape_port' in locals()) and (chunkmode != 4):
        metadat['shape_port'] = shape_port
        nrec = metadat['shape_port'][0] * metadat['shape_port'][2]
    elif ('shape_port' in locals()) and (chunkmode == 4):
        metadat['shape_port'] = shape_port
        nrec = metadat['shape_port'][1]
    else:
        metadat['shape_port'] = ''

    if ('shape_star' in locals()) and (chunkmode != 4):
        metadat['shape_star'] = shape_star
        nrec = metadat['shape_star'][0] * metadat['shape_star'][2]
    elif ('shape_star' in locals()) and (chunkmode == 4):
        metadat['shape_star'] = shape_star
        nrec = metadat['shape_star'][1]
    else:
        metadat['shape_star'] = ''

    if ('shape_hi' in locals()) and (chunkmode != 4):
        metadat['shape_hi'] = shape_hi
        #nrec = metadat['shape_hi'][0] * metadat['shape_hi'][2] * 2
    elif ('shape_hi' in locals()) and (chunkmode == 4):
        metadat['shape_hi'] = shape_hi
    else:
        metadat['shape_hi'] = ''

    if ('shape_low' in locals()) and (chunkmode != 4):
        metadat['shape_low'] = shape_low
        #nrec = metadat['shape_low'][0] * metadat['shape_low'][2] * 2
    elif ('shape_low' in locals()) and (chunkmode == 4):
        metadat['shape_low'] = shape_low
    else:
        metadat['shape_low'] = ''

    #make kml boat trackline
    humutils.make_trackline(lon, lat, sonpath, base)

    if 'port_fp' in locals() and 'star_fp' in locals():

        #if not os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'meta.mat'))):
        if 2 > 1:
            if bedpick == 1:  # auto

                x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp,
                                               c)

                if len(dist_m) < len(bed):
                    dist_m = np.append(
                        dist_m, dist_m[-1] * np.ones(len(bed) - len(dist_m)))

                if doplot == 1:
                    if chunkmode != 4:
                        for k in range(len(star_fp)):
                            plot_2bedpicks(
                                port_fp[k], star_fp[k],
                                bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                                dist_m[ind_port[-1] * k:ind_port[-1] *
                                       (k + 1)],
                                x[ind_port[-1] * k:ind_port[-1] * (k + 1)], ft,
                                shape_port, sonpath, k, chunkmode)
                    else:
                        plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft,
                                       shape_port, sonpath, 0, chunkmode)

                # 'real' bed is estimated to be the minimum of the two
                bed = np.min(np.vstack((bed[:nrec], np.squeeze(x[:nrec]))),
                             axis=0)
                bed = humutils.runningMeanFast(bed, 3)

            elif bedpick > 1:  # user prompt

                x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp,
                                               c)

                if len(dist_m) < len(bed):
                    dist_m = np.append(
                        dist_m, dist_m[-1] * np.ones(len(bed) - len(dist_m)))

                # 'real' bed is estimated to be the minimum of the two
                bed = np.min(np.vstack((bed[:nrec], np.squeeze(x[:nrec]))),
                             axis=0)
                bed = humutils.runningMeanFast(bed, 3)

                # manually intervene
                fig = plt.figure()
                ax = plt.gca()
                if chunkmode != 4:
                    im = ax.imshow(np.hstack(port_fp),
                                   cmap='gray',
                                   origin='upper')
                else:
                    im = ax.imshow(port_fp, cmap='gray', origin='upper')
                plt.plot(bed, 'r')
                plt.axis('normal')
                plt.axis('tight')

                pts1 = plt.ginput(
                    n=300,
                    timeout=30)  # it will wait for 200 clicks or 60 seconds
                x1 = map(lambda x: x[0],
                         pts1)  # map applies the function passed as
                y1 = map(lambda x: x[1],
                         pts1)  # first parameter to each element of pts
                plt.close()
                del fig

                if x1 != []:  # if x1 is not empty
                    tree = KDTree(zip(np.arange(1, len(bed)), bed))
                    try:
                        dist, inds = tree.query(zip(x1, y1),
                                                k=100,
                                                eps=5,
                                                n_jobs=-1)
                    except:
                        dist, inds = tree.query(zip(x1, y1), k=100, eps=5)

                    b = np.interp(inds, x1, y1)
                    bed2 = bed.copy()
                    bed2[inds] = b
                    bed = bed2

                if doplot == 1:
                    if chunkmode != 4:
                        for k in range(len(star_fp)):
                            plot_2bedpicks(
                                port_fp[k], star_fp[k],
                                bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                                dist_m[ind_port[-1] * k:ind_port[-1] *
                                       (k + 1)],
                                x[ind_port[-1] * k:ind_port[-1] * (k + 1)], ft,
                                shape_port, sonpath, k, chunkmode)
                    else:
                        plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft,
                                       shape_port, sonpath, 0, chunkmode)

            else:  #manual

                beds = []

                if chunkmode != 4:
                    for k in range(len(port_fp)):
                        raw_input(
                            "Bed picking " + str(k + 1) + " of " +
                            str(len(port_fp)) +
                            ", are you ready? 30 seconds. Press Enter to continue..."
                        )
                        bed = {}
                        fig = plt.figure()
                        ax = plt.gca()
                        im = ax.imshow(port_fp[k], cmap='gray', origin='upper')
                        pts1 = plt.ginput(
                            n=300, timeout=30
                        )  # it will wait for 200 clicks or 60 seconds
                        x1 = map(lambda x: x[0],
                                 pts1)  # map applies the function passed as
                        y1 = map(
                            lambda x: x[1],
                            pts1)  # first parameter to each element of pts
                        bed = np.interp(np.r_[:ind_port[-1]], x1, y1)
                        plt.close()
                        del fig
                        beds.append(bed)
                        extent = np.shape(port_fp[k])[0]
                    bed = np.asarray(np.hstack(beds), 'float')
                else:
                    raw_input(
                        "Bed picking - are you ready? 30 seconds. Press Enter to continue..."
                    )
                    bed = {}
                    fig = plt.figure()
                    ax = plt.gca()
                    im = ax.imshow(port_fp, cmap='gray', origin='upper')
                    pts1 = plt.ginput(
                        n=300, timeout=30
                    )  # it will wait for 200 clicks or 60 seconds
                    x1 = map(lambda x: x[0],
                             pts1)  # map applies the function passed as
                    y1 = map(lambda x: x[1],
                             pts1)  # first parameter to each element of pts
                    bed = np.interp(np.r_[:ind_port[-1]], x1, y1)
                    plt.close()
                    del fig
                    beds.append(bed)
                    extent = np.shape(port_fp)[1]
                    bed = np.asarray(np.hstack(beds), 'float')

            # now revise the depth in metres
            dep_m = (1 / ft) * bed

            if doplot == 1:
                if chunkmode != 4:
                    for k in range(len(star_fp)):
                        plot_bedpick(
                            port_fp[k], star_fp[k], (1 / ft) *
                            bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                            dist_m[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                            ft, shape_port, sonpath, k, chunkmode)
                else:
                    plot_bedpick(port_fp, star_fp, (1 / ft) * bed, dist_m, ft,
                                 shape_port, sonpath, 0, chunkmode)

            metadat['bed'] = bed[:nrec]

    else:
        metadat['bed'] = dep_m[:nrec] * ft

    metadat['heading'] = metadat['heading'][:nrec]
    metadat['lon'] = lon[:nrec]
    metadat['lat'] = lat[:nrec]
    metadat['dist_m'] = dist_m[:nrec]
    metadat['dep_m'] = dep_m[:nrec]
    metadat['pix_m'] = 1 / ft
    metadat['bed'] = metadat['bed'][:nrec]
    metadat['c'] = c
    metadat['t'] = t
    if model == 2:
        metadat['f'] = f * 2
    else:
        metadat['f'] = f

    metadat['spd'] = metadat['spd'][:nrec]
    metadat['time_s'] = metadat['time_s'][:nrec]
    metadat['e'] = metadat['e'][:nrec]
    metadat['n'] = metadat['n'][:nrec]
    metadat['es'] = metadat['es'][:nrec]
    metadat['ns'] = metadat['ns'][:nrec]
    try:
        metadat['caltime'] = metadat['caltime'][:nrec]
    except:
        metadat['caltime'] = metadat['caltime']

    savemat(os.path.normpath(os.path.join(sonpath, base + 'meta.mat')),
            metadat,
            oned_as='row')

    f = open(os.path.normpath(os.path.join(sonpath, base + 'rawdat.csv')),
             'wt')
    writer = csv.writer(f)
    writer.writerow(
        ('longitude', 'latitude', 'easting', 'northing', 'depth (m)',
         'distance (m)', 'instr. heading (deg)', 'heading (deg.)'))
    for i in range(0, nrec):
        writer.writerow(
            (float(lon[i]), float(lat[i]), float(es[i]), float(ns[i]),
             float(dep_m[i]), float(dist_m[i]),
             float(metadat['instr_heading'][i]), float(metadat['heading'][i])))
    f.close()

    del lat, lon, dep_m  #, dist_m

    if doplot == 1:

        plot_pos(sonpath, metadat, es, ns)

        if 'dwnlow_fp' in locals():

            plot_dwnlow(dwnlow_fp, chunkmode, sonpath)

        if 'dwnhi_fp' in locals():

            plot_dwnhi(dwnhi_fp, chunkmode, sonpath)

    if os.name == 'posix':  # true if linux/mac
        elapsed = (time.time() - start)
    else:  # windows
        elapsed = (time.clock() - start)
    print("Processing took " + str(elapsed) + "seconds to analyse")

    print("Done!")
    print("===================================================")
Esempio n. 48
0
            r_file.close()
            print ("\ndone")
            break


def main():
    global a, sgkdir, pr_s, result_queue, wr_s
    pr_p = threading.Thread(target=pr)
    wr_p = threading.Thread(target=wr)
    pr_s = True
    wr_s = True
    pr_p.start()
    wr_p.start()
    for target_file in get_file(sgkdir):
        b = config_handler()
        a = 0
        print "Start handle %s" % target_file
        for line in open(target_file, "rb"):
            a += 1
            line_temp = line.strip()
            if line_temp != "":
                line_temp = b.handle(line_temp)
                result_queue.append(line_temp)
        print "\nCompleted: %s" % target_file
    wr_s = False
    pr_s = False
    exit(0)

if __name__ == "__main__":
    start = time.clock()
    main()
Esempio n. 49
0
            all_layouts.append([onto_basis2, onto_basis22, pur_p])

            print 'End   purpose:', pur_p, '--------------------------------------------------------------'
        #
        print 'Start process page:---'
        process_termo(termo, username, pur_p, start_c, '', all_layouts)
        #=========================
        if len(entry_doc) > 0: return


start_c = 0

import time

startTT = time.clock()


def remp(s):
    r = ''
    i = len(s) - 1
    pos = 0
    while i >= 0:
        if s[i] == '\\' or s[i] == '/':
            pos = i
            break
        r = s[i] + r
        i -= 1
    return s[:pos + 1]

        'b0': b0,
        'N': N,
        'nw': 0.2,  # net worth of the central bank
        'pibar': 1.6,  # constraint on central bank losses
        'epeg': epeg,
        'ebar': ebar,
        'gamma': gamma,
        'lam': lam,
        'g_val': g_val,
        'c_states': (0, 1),  # (low perm, low transitory, high transitory)
        'c_transition': interest_transition,
        'c_istar_values': (ilow, ihigh)
    }

    sns.set_style("whitegrid")  # optional seabird settings
    t1 = time.clock()
    model = gs.Model(**par)
    print("time : {} seconds".format(time.clock() - t1))
    gs.do_plots(model, save_to_file=False, file_name='benchmark_figure.pdf')

    robustness = {}
    labels = {}
    markers = ['^--', 'o-', 's--']

    robustness['ebar'] = []
    labels['ebar'] = []
    for ebar_val, m in zip([0.65, 0.7], markers):
        par2 = par.copy()
        par2['ebar'] = ebar_val
        robustness['ebar'].append(gs.Model(**par2))
        lab = {}
Esempio n. 51
0
    def kk(self, energy=None, mu=None, z=None, edge='K', how='scalar', mback_kws=None):
        """
        Convert mu(E) data into f'(E) and f"(E).  f"(E) is made by
        matching mu(E) to the tabulated values of the imaginary part
        of the scattering factor (Cromer-Liberman), f'(E) is then
        obtained by performing a differential Kramers-Kronig transform
        on the matched f"(E).

          Attributes
            energy:     energy array
            mu:         array with mu(E) data
            z:          Z number of absorber
            edge:       absorption edge, usually 'K' or 'L3'
            mback_kws:  arguments for the mback algorithm

          Returns
            self.f1, self.f2:  CL values over on the input energy grid
            self.fp, self.fpp: matched and KK transformed data on the input energy grid

        References:
          * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
          * KK computation: Ohta and Ishida, Applied Spectroscopy 42:6 (1988) 952-957
          * diffKK implementation: http://dx.doi.org/10.1103/PhysRevB.58.11215
          * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
          * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970

        """

        if type(energy).__name__ == 'ndarray': self.energy = energy
        if type(mu).__name__     == 'ndarray': self.mu     = mu
        if z    != None: self.z    = z
        if edge != None: self.edge = edge
        if mback_kws != None: self.mback_kws = mback_kws

        if self.z == None:
            Exception("Z for absorber not provided for diffKK")
        if self.edge == None:
            Exception("absorption edge not provided for diffKK")

        mb_kws = dict(order=3, z=self.z, edge=self.edge, e0=None, emin=None, emax=None,
                      whiteline=False, leexiang=False, tables='chantler',
                      fit_erfc=False, return_f1=True)
        if self.mback_kws is not None:
            mb_kws.update(self.mback_kws)

        start = time.clock()

        mback(self.energy, self.mu, group=self, _larch=self._larch, **mb_kws)

        ## interpolate matched data onto an even grid with an even number of elements (about 1 eV)
        npts = int(self.energy[-1] - self.energy[0]) + (int(self.energy[-1] - self.energy[0])%2)
        self.grid = np.linspace(self.energy[0], self.energy[-1], npts)
        fpp = interp(self.energy, self.f2-self.fpp, self.grid, fill_value=0.0)

        ## do difference KK
        if repr(how).startswith('sca'):
            fp = kkmclr_sca(self.grid, fpp)
        else:
            fp = kkmclr(self.grid, fpp)

        ## interpolate back to original grid and add diffKK result to f1 to make fp array
        self.fp = self.f1 + interp(self.grid, fp, self.energy, fill_value=0.0)

        ## clean up group
        #for att in ('normalization_function', 'weight', 'grid'):
        #    if hasattr(self, att): delattr(self, att)
        finish = time.clock()
        self.time_elapsed = float(finish-start)
Esempio n. 52
0
        ac = karatsuba(int1_split[0], int2_split[0])
        bd = karatsuba(int1_split[1], int2_split[1])
        ab = np.sum(int1_split[0:2])
        cd = np.sum(int2_split[0:2])
        apb_dpc = karatsuba(ab, cd) - ac - bd
        return np.power(power, 2) * ac + power * apb_dpc + bd


###Ejemplos
karatsuba(1234, 5678)
karatsuba(12341234, 234)
######################################################################################
## Pruebas de estress
######################################################################################
## Facil
time_start = time.clock()
karatsuba(1234, 5678)
time_elapsed = (time.clock() - time_start)

time_start = time.clock()
1234 * 5678
time_elapsed = (time.clock() - time_start)

## medio
time_start = time.clock()
karatsuba(12341234123412341234, 1241241234123412341234)
time_elapsed = (time.clock() - time_start)

time_start = time.clock()
12341234123412341234 * 1241241234123412341234
time_elapsed = (time.clock() - time_start)
Esempio n. 53
0
    def parse_series(self, data, **kwargs):
        log.debug('Parsing series: `%s` [options: %s]', data, kwargs)
        guessit_options = self._guessit_options(kwargs)
        valid = True
        if kwargs.get('name'):
            expected_titles = [kwargs['name']]
            if kwargs.get('alternate_names'):
                expected_titles.extend(kwargs['alternate_names'])
            # apostrophe support
            expected_titles = [title.replace('\'', '(?:\'|\\\'|\\\\\'|-|)?') for title in expected_titles]
            guessit_options['expected_title'] = ['re:' + title for title in expected_titles]
        if kwargs.get('id_regexps'):
            guessit_options['id_regexps'] = kwargs.get('id_regexps')
        start = time.clock()
        # If no series name is provided, we don't tell guessit what kind of match we are looking for
        # This prevents guessit from determining that too general of matches are series
        parse_type = 'episode' if kwargs.get('name') else None
        if parse_type:
            guessit_options['type'] = parse_type

        # NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native
        try:
            guess_result = guessit_api.guessit(native(data), options=guessit_options)
        except GuessitException:
            log.warning('Parsing %s with guessit failed. Most likely a unicode error.', data)
            guess_result = {}

        if guess_result.get('type') != 'episode':
            valid = False

        name = kwargs.get('name')
        country = guess_result.get('country')
        if not name:
            name = guess_result.get('title')
            if country and hasattr(country, 'alpha2'):
                name += ' (%s)' % country.alpha2
        elif guess_result.matches['title']:
            # Make sure the name match is up to FlexGet standards
            # Check there is no unmatched cruft before the matched name
            title_start = guess_result.matches['title'][0].start
            title_end = guess_result.matches['title'][0].end
            if title_start != 0:
                try:
                    pre_title = max((match[0].end for match in guess_result.matches.values() if
                                     match[0].end <= title_start))
                except ValueError:
                    pre_title = 0
                for char in reversed(data[pre_title:title_start]):
                    if char.isalnum() or char.isdigit():
                        return SeriesParseResult(data=data, valid=False)
                    if char.isspace() or char in '._':
                        continue
                    else:
                        break
            # Check the name doesn't end mid-word (guessit might put the border before or after the space after title)
            if data[title_end - 1].isalnum() and len(data) <= title_end or \
                    not self._is_valid_name(data, guessit_options=guessit_options):
                valid = False
            # If we are in exact mode, make sure there is nothing after the title
            if kwargs.get('strict_name'):
                post_title = sys.maxsize
                for match_type, matches in guess_result.matches.items():
                    if match_type in ['season', 'episode', 'date', 'regexpId']:
                        if matches[0].start < title_end:
                            continue
                        post_title = min(post_title, matches[0].start)
                        if matches[0].parent:
                            post_title = min(post_title, matches[0].parent.start)
                for char in data[title_end:post_title]:
                    if char.isalnum() or char.isdigit():
                        valid = False
        else:
            valid = False
        season = guess_result.get('season')
        episode = guess_result.get('episode')
        if episode is None and 'part' in guess_result:
            episode = guess_result['part']
        if isinstance(episode, list):
            # guessit >=2.1.4 returns a list for multi-packs, but we just want the first one and the number of eps
            episode = episode[0]
        date = guess_result.get('date')
        quality = self._quality(guess_result)
        proper_count = self._proper_count(guess_result)
        group = guess_result.get('release_group')
        # Validate group with from_group
        if not self._is_valid_groups(group, guessit_options.get('allow_groups', [])):
            valid = False
        # Validate country, TODO: LEGACY
        if country and name.endswith(')'):
            p_start = name.rfind('(')
            if p_start != -1:
                parenthetical = re.escape(name[p_start + 1:-1])
                if parenthetical and parenthetical.lower() != str(country).lower():
                    valid = False
        special = guess_result.get('episode_details', '').lower() == 'special'
        if 'episode' not in guess_result.values_list:
            episodes = len(guess_result.values_list.get('part', []))
        else:
            episodes = len(guess_result.values_list['episode'])
        if episodes > 3:
            valid = False
        identified_by = kwargs.get('identified_by', 'auto')
        identifier_type, identifier = None, None
        if identified_by in ['date', 'auto']:
            if date:
                identifier_type = 'date'
                identifier = date
        if not identifier_type and identified_by in ['ep', 'auto']:
            if episode is not None:
                if season is None and kwargs.get('allow_seasonless', True):
                    if 'part' in guess_result:
                        season = 1
                    else:
                        episode_raw = guess_result.matches['episode'][0].initiator.raw
                        if episode_raw and any(c.isalpha() and c.lower() != 'v' for c in episode_raw):
                            season = 1
                if season is not None:
                    identifier_type = 'ep'
                    identifier = (season, episode)

        if not identifier_type and identified_by in ['id', 'auto']:
            if guess_result.matches['regexpId']:
                identifier_type = 'id'
                identifier = '-'.join(match.value for match in guess_result.matches['regexpId'])
        if not identifier_type and identified_by in ['sequence', 'auto']:
            if episode is not None:
                identifier_type = 'sequence'
                identifier = episode
        if (not identifier_type or guessit_options.get('prefer_specials')) and (special or
                                                                        guessit_options.get('assume_special')):
            identifier_type = 'special'
            identifier = guess_result.get('episode_title', 'special')
        if not identifier_type:
            valid = False
        # TODO: Legacy - Complete == invalid
        if 'complete' in normalize_component(guess_result.get('other')):
            valid = False

        parsed = SeriesParseResult(
            data=data,
            name=name,
            episodes=episodes,
            identified_by=identified_by,
            id=identifier,
            id_type=identifier_type,
            quality=quality,
            proper_count=proper_count,
            special=special,
            group=group,
            valid=valid
        )

        end = time.clock()
        log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000)
        return parsed
    quant_engine = create_engine(
        'mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**ConfigQuant))

    # create source engine
    spider_engine = create_engine(
        'mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**ConfigSpider2))

    chunk_size = 10

    # updateFull(quant_engine, spider_engine, chunk_size)
    # supplementForAdjQuote(quant_engine)
    updateIncrm(quant_engine, spider_engine)


if __name__ == '__main__':
    # create target engine
    quant_engine = create_engine(
        'mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**ConfigQuant))

    # create source engine
    spider_engine = create_engine(
        'mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**ConfigSpider2))

    chunk_size = 10
    t_start = time.clock()

    # updateFull(quant_engine, spider_engine, chunk_size)

    # updateIncrm(quant_engine, spider_engine)

    print(time.clock() - t_start)
Esempio n. 55
0
            num_rep * 1.0 / size * 1.0
        ) / 3600.0, 'time grown:', par_vals['dt'] * par_vals['nstep'] * N

    dx = num_rep / size
    rem = np.mod(num_rep, size)
    if rank >= size - rem:  # this makes sure that it distributes the remainder as equally as possible.
        start = dx * (size - rem) + (dx + 1) * (rank + rem - size)
        stop = start + dx + 1
    else:
        start = dx * rank
        stop = start + dx
    if rank == size - 1:
        stop = num_rep
    print 'I am {0}, my start is {1}, stop {2}'.format(str(rank), str(start),
                                                       str(stop))
    tic = time.clock()
    for i0 in xrange(start, stop):  # variable number of repeats for each core
        c = g.discr_gen(par_vals)
        temp = [obj for obj in c if obj.exists]
        for i1 in range(X[1]):
            # tic = time.clock()
            temp0, temp1 = g.starting_popn_seeded_1(temp,
                                                    par_vals,
                                                    discr_time=True)
            if np.mod(i1, save_freq) == 0:
                np.save(
                    '../../Documents/data_storage/March17_dilution_symmetric_3_savedpop_model_{0}_rep_{1}_it_{2}_asymm'
                    .format(str(par_vals['modeltype']), str(i0),
                            str(i1)), temp1)
            new_c = g.discr_time_1(par_vals, temp0)
            mothers = [obj.mother for obj in new_c[0] if obj.exists]
Esempio n. 56
0
    B = np.eye( n, n )
    Y = np.eye( n, 3 )


#    X = sp.rand( n, 3 )
    xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'}
    X = np.fromfile( xfile[n], dtype = np.float64, sep = ' ' )
    X.shape = (n, 3)

    ivals = [1./vals[0]]
    def precond( x ):
        invA = spdiags( ivals, 0, n, n )
        y = invA  * x
        if issparse( y ):
            y = y.toarray()

        return as2d( y )

    precond = spdiags( ivals, 0, n, n )
#    precond = None
    tt = time.clock()
#    B = None
    eigs, vecs = lobpcg( X, A, B, blockVectorY = Y,
                         M = precond,
                         residualTolerance = 1e-4, maxIterations = 40,
                         largest = False, verbosityLevel = 1 )
    print('solution time:', time.clock() - tt)

    print(vecs)
    print(eigs)
Esempio n. 57
0
def equil_solver(w):

    '''
    This is a function that calculates the optimal policy
    for k' and the stationary distribution for a given wage
    and is used to get the equilibrium solution
    '''

    # operating profits, op
    sizez = len(z_grid)
    op = np.zeros((sizez, sizek))
    for i in range(sizez):
        for j in range(sizek):
            op[i,j] = ((1 - a_l) * ((a_l / w) ** (a_l / (1 - a_l))) *
          ((kgrid[j] ** a_k) ** (1 / (1 - a_l))) * (z_grid[i] ** (1/(1 - a_l))))

    # firm cash flow, e    
    e = np.zeros((sizez, sizek, sizek))
    for i in range(sizez):
        for j in range(sizek):
            for k in range(sizek):
                e[i, j, k] = (op[i,j] - kgrid[k] + ((1 - delta) * kgrid[j]) -
                           ((psi / 2) * ((kgrid[k] - ((1 - delta) * kgrid[j])) ** 2)
                            / kgrid[j]))

    # value function iteration
    VFtol = 1e-6
    VFdist = 7.0
    VFmaxiter = 3000
    V = np.zeros((sizez, sizek))
    Vmat = np.zeros((sizez, sizek, sizek))  
    Vstore = np.zeros((sizez, sizek, VFmaxiter)) 
    VFiter = 1

    start_time = time.clock()
    while VFdist > VFtol and VFiter < VFmaxiter:
        TV = V    
        Vmat = VFI_loop(V, e, betafirm, sizez, sizek, Vmat, pi)
        Vstore[:, :, VFiter] = V.reshape(sizez, sizek,) 
        V = Vmat.max(axis=2) 
        PF = np.argmax(Vmat, axis=2) 
        Vstore[:,:, i] = V  
        VFdist = (np.absolute(V - TV)).max()
        VFiter += 1
    
    VFI_time = time.clock() - start_time
    if VFiter < VFmaxiter:
        print('Value function converged after this many iterations:', VFiter)
    else:
        print('Value function did not converge')
    print('VFI took ', VFI_time, ' seconds to solve')  
    VF = V

    # optimal capital stock k'
    optK = kgrid[PF]     

    # stationary distribution
    Gamma = np.ones((sizez, sizek)) * (1 / (sizek * sizez))
    SDtol = 1e-12
    SDdist = 7
    SDiter = 0
    SDmaxiter = 1000
    while SDdist > SDtol and SDmaxiter > SDiter:
        HGamma = SD_loop(PF, pi, Gamma, sizez, sizek)
        SDdist = (np.absolute(HGamma - Gamma)).max()
        Gamma = HGamma
        SDiter += 1
    if SDiter < SDmaxiter:
        print('Stationary distribution converged after this many iterations: ',
              SDiter)
    else:
        print('Stationary distribution did not converge')
        
    return optK, Gamma
Esempio n. 58
0
"""
该脚本演示了在MXNet中使用ART的简单示例。
该示例在MNIST数据集上训练一个小模型,并使用快速梯度符号方法创建对抗性示例。
这里我们使用ART分类器来训练模型,也可以为ART分类器提供一个预先训练好的模型。
选择这些参数是为了减少脚本的计算需求,而不是为了提高准确性。"""
import mxnet
from mxnet.gluon.nn import Conv2D, MaxPool2D, Flatten, Dense
import numpy as np

from art.attacks import FastGradientMethod
from art.classifiers import MXClassifier
from art.utils import load_mnist
import time

start = time.clock()

# Step 1: 加载MNIST数据集 28x28的灰度图 70000张手写数字图(6:1)

(x_train, y_train), (x_test,
                     y_test), min_pixel_value, max_pixel_value = load_mnist()

# Step 1a: 把轴转换到MXNet的NCHW格式

x_train = np.swapaxes(x_train, 1, 3)
x_test = np.swapaxes(x_test, 1, 3)

# Step 2: 创建模型

model = mxnet.gluon.nn.Sequential()
with model.name_scope():
    model.add(Conv2D(channels=4, kernel_size=5, activation='relu'))
Esempio n. 59
0
def main():
    # Parsing options
    parser = argparse.ArgumentParser(description='Hgt candidate evaluation.')
    parser.add_argument('bamfile', help="BAM alignments file for both acceptor and donor, sorted via qname")
    parser.add_argument('candfile', help="File with inter-chr translocation breakpoints")
    parser.add_argument('acceptor', help="Name of acceptor reference (gi as in fasta)")
    parser.add_argument('donor', help="Name of donor reference (gi as in fasta)")

    # optional arguments (has default values)
    parser.add_argument('--phagefile', default=None, help="SAM alignments file for phage database")
    parser.add_argument('-o','--outfile', default="hgt_eval.vcf", help="Output VCF file with filtered, evaluated candidates")
    parser.add_argument('-min', dest='min_size', default=100, type=int, help="minimal HGT size, default 100")
    parser.add_argument('-max', dest='max_size', default=50000, type=int, help="maximal HGT size, default 50000")
    parser.add_argument('--tolerance', default=20, type=int, help="Position range to remove duplicates, default 20")
    parser.add_argument('--pair-support', dest='paired_reads', default=True, action='store_false', help="Turn on/off paired reads support, default TRUE")
    parser.add_argument('--num-boot-regions', dest='boot_num', default=100, type=int, help="Number of sampled regions in acceptor for bootstrapping expected number of pairs and coverage for filtering, default 100")
    parser.add_argument('--boot-sens', dest='boot_sens', default=95, type=int, choices=range(0, 100), help="Percent of cases for the candidate region to exceed values of sampled regions, default 95 percent")

    options = parser.parse_args()

    # Extracting parameters
    hgt_minLength = options.min_size
    hgt_maxLength = options.max_size
    bf = pysam.Samfile(options.bamfile,'rb')
    if (options.phagefile is not None):
        psf = pysam.Samfile(options.phagefile,'r')
    acc_tid = bf.gettid(options.acceptor)
    don_tid = bf.gettid(options.donor)
    options.out_all = options.outfile.strip('vcf')+'tsv'

    print ('acc_tid', acc_tid, options.acceptor)
    print ('don_tid', don_tid, options.donor)
    print ('hgt_minLength', hgt_minLength)
    print ('hgt_maxLength', hgt_maxLength)
    print ('paired_reads', options.paired_reads)
    print ('num_boot_regions', options.boot_num)
    print ('boot_sensitivity', options.boot_sens)

    # Getting acceptor genome length
    options.acc_length = bf.lengths[acc_tid]
    options.don_length = bf.lengths[don_tid]

    # Extracting coverages
    covs = [np.zeros((l,)) for l in bf.lengths]
    num_matches = 0 # number of read matches, unused by now
    for read in bf:
        if not read.is_unmapped:
            r_start = read.pos # start position
            r_end = read.pos + read.qlen # end
            covs[read.tid][r_start:r_end] += 1
            num_matches += 1
    bf.reset()

    acc_cov = covs[acc_tid]
    don_cov = covs[don_tid]

    # Extracting phage read IDs, if any
    phage_list = []
    if (options.phagefile != None):
        phage_list = [qname for qname in read_phage_pairs(psf)]

    # Extracting candidate positions from candifate file
    cand_list = [cand for cand in get_candidates(options.candfile, options.acceptor, options.donor)]
    
    #indices = range(len(acc_pos))
    #indices.sort(key=acc_pos.__getitem__)
    cand_list.sort(key=operator.attrgetter('acc_pos'))

    tstart = time.clock()

    # Pair up single boundary candidates to HGT candidates conforming to size constraints
    hgt_list = []
    for ps in range(0, len(cand_list)):
        cand_start = cand_list[ps]
        acc_start = cand_start.acc_pos
        don_start_temp = cand_start.don_pos
        for pe in range(ps, len(cand_list)):
            cand_end = cand_list[pe]
            acc_end = cand_end.acc_pos + 1
            don_end_temp = cand_end.don_pos + 1
            # Exchange don_start/end so that don_start < don_end (we don't care about the orientation for now)
            if (don_end_temp < don_start_temp):
                don_start = don_end_temp
                don_end = don_start_temp
            else:
                don_start = don_start_temp
                don_end = don_end_temp
            # Avoid similar hgt entries within tolerance
            if (duplicate_entry(hgt_list, options.tolerance, acc_start, acc_end, don_start, don_end, cand_end.split_support)):
                continue
            # Continue if acceptor HGT region exceeds max length
            if (abs(acc_end - acc_start) > hgt_maxLength):
                break
            if (abs(don_end - don_start) > hgt_minLength and abs(don_end - don_start) < hgt_maxLength):
                split_support = (cand_start.split_support + cand_end.split_support)
                hgt_list.append(HGT(acc_start, acc_end, cand_start.acc_base, cand_end.acc_base, don_start, don_end, split_support, options, acc_cov, don_cov))

    print (time.clock() - tstart)

    # Get (all/primary) read pairs which map to both donor and acceptor and add them to hgt attributes
    if (options.paired_reads): 
        #for aln1, aln2 in read_pairs(bf):
        for aln1, aln2 in read_primary_pairs(bf):
            if aln1.is_unmapped:
                continue
            if aln2.is_unmapped:
                continue
            if (aln1.tid == don_tid) and (aln2.tid == don_tid):
                for hgt in hgt_list:
                    hgt.add_don_pair_if_matching(aln1, aln2, phage_list)
            if (aln1.tid != acc_tid) or (aln2.tid != don_tid):
                aln1, aln2 = aln2, aln1 # Ensures that aln1 belongs to acceptor and aln2 to donor
            if (aln1.tid != acc_tid) or (aln2.tid != don_tid):
                continue
            for hgt in hgt_list:
                hgt.add_pair_if_matching(aln1, aln2, phage_list)
    print ("total sample time ", time.clock() - tstart)
    
    # Output results       
    print ("writing output")
    with open(options.outfile, 'w') as vcf_out, open(options.out_all, 'w') as output:
        # VCF header
        writevcf = csv.writer(vcf_out, delimiter = '\t')
        writevcf.writerow(['##fileformat=VCFv4.2'])
        writevcf.writerow(['##source=DAISY'])
        writevcf.writerow(['##INFO=<ID=EVENT,Number=1,Type=String,Description=\"Event identifier for breakends.\">'])
        writevcf.writerow(['##contig=<ID='+options.acceptor+'>'])
        writevcf.writerow(['##contig=<ID='+options.donor+'>'])
        writevcf.writerow(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT'])

        # TSV header
        writetsv = csv.writer(output, delimiter = '\t')
        writetsv.writerow(['#AS: Acceptor start position'])
        writetsv.writerow(['#AE: Acceptor end position'])
        writetsv.writerow(['#DS: Donor start position'])
        writetsv.writerow(['#DE: Donor end position'])
        writetsv.writerow(['#MC: Mean coverage in region'])
        writetsv.writerow(['#Split: Total number split-reads per region (including duplicates!)'])
        writetsv.writerow(['#PS-S: Pairs spanning HGT boundaries'])
        writetsv.writerow(['#PS-W: Pairs within HGT boundaries'])
        writetsv.writerow(['#Phage: PS-S and PS-W reads mapping to phage database'])
        writetsv.writerow(['#BS:MC/PS-S/PS-W: Percent of bootstrapped random regions with MC/PS-S/PS-W smaller than candidate'])
        if (options.paired_reads):
            writetsv.writerow(['AS', 'AE', 'MC', 'BS:MC', 'DS', 'DE', 'MC', 'Split', 'PS-S', 'PS-W', 'Phage', 'BS:MC', 'BS:PS-S', 'BS:PS-W'])
        else:
            writetsv.writerow(['AS', 'AE', 'MC', 'BS:MC', 'DS', 'DE', 'MC', 'Split', 'BS:MC'])

        # write candidates
        hgt_vcf_counter = 1
        # Define sensitivity values for candidates to be reported in VCF
        sens = float(options.boot_sens)/float(100)
        sens_acc = 1.0 - sens
        thresh = sens * float(options.boot_num)
        for hgt in hgt_list:
            # evaluate bootstrap
            acc_cov_test = sum(1 for i in range(len(list(hgt.boot_acc_coverage_list))) if hgt.acc_mean > hgt.boot_acc_coverage_list[i])
            don_cov_test = sum(1 for i in range(len(list(hgt.boot_don_coverage_list))) if hgt.don_mean > hgt.boot_don_coverage_list[i])
            if (options.paired_reads):
                # write all candidates to tsv file
                cross_pair_test = sum(1 for i in range(len(list(hgt.boot_crossing_pairs_list))) if hgt.pair_support > hgt.boot_crossing_pairs_list[i])
                don_pair_test = sum(1 for i in range(len(list(hgt.boot_don_pairs_list))) if hgt.don_pair_support > hgt.boot_don_pairs_list[i])
                phage_support = 0
                if ((hgt.pair_support + hgt.don_pair_support) > 0):
                    phage_support = float(hgt.phage_hits)/float((hgt.pair_support + hgt.don_pair_support))
                writetsv.writerow([hgt.acc_start, hgt.acc_end, "%.2f" % hgt.acc_mean, acc_cov_test, hgt.don_start, hgt.don_end, "%.2f" % hgt.don_mean, hgt.split_support,  hgt.pair_support, hgt.don_pair_support, "%.4f" % phage_support, don_cov_test, cross_pair_test, don_pair_test])
            else:
                writetsv.writerow([hgt.acc_start, hgt.acc_end, "%.2f" % hgt.acc_mean, acc_cov_test, hgt.don_start, hgt.don_end, "%.2f" % hgt.don_mean, hgt.split_support, don_cov_test])

            # Write only canidates to VCF file that passed the filter (boot_sens)
            if (options.boot_sens > 0):
                if (acc_cov_test < thresh) and (acc_cov_test > sens_acc * options.boot_num): continue
                if (options.paired_reads):
                    if (cross_pair_test < thresh) or (don_pair_test < thresh): continue
                if (don_cov_test < thresh): continue

            # write filtered candidates to VCF file
            writevcf.writerow([options.acceptor, hgt.acc_start, 'BND_'+str(hgt_vcf_counter)+'_1', hgt.acc_start_base, hgt.acc_start_base+'['+options.donor+':'+str(hgt.don_start)+'[', 'PASS', 'SVTYPE=BND;EVENT=HGT'+str(hgt_vcf_counter), '.', '1'])
            writevcf.writerow([options.acceptor, hgt.acc_end, 'BND_'+str(hgt_vcf_counter)+'_2', hgt.acc_end_base, ']'+options.donor+':'+str(hgt.don_end)+']'+hgt.acc_end_base, 'PASS', 'SVTYPE=BND;EVENT=HGT'+str(hgt_vcf_counter), '.', '1'])
            hgt_vcf_counter += 1

        if (hgt_vcf_counter == 1):
            print ('No canidates written to VCF, try to rerun with lower sampling sensitivity (--boot_sens)')
Esempio n. 60
0
def grand(w):

    '''
    This is a grand loop that first calculates the firm's optimal decision rule, the HH's 
    consumption and labor choices, then aggregates individual choice to the overall economy, and finally 
    calculates the distance between aggregate labor demand and supply
    '''

    # operating profits, op
    sizez = len(z_grid)
    op = np.zeros((sizez, sizek))
    for i in range(sizez):
        for j in range(sizek):
            op[i,j] = ((1 - a_l) * ((a_l / w) ** (a_l / (1 - a_l))) *
          ((kgrid[j] ** a_k) ** (1 / (1 - a_l))) * (z_grid[i] ** (1/(1 - a_l))))

    # firm cash flow, e    
    e = np.zeros((sizez, sizek, sizek))
    for i in range(sizez):
        for j in range(sizek):
            for k in range(sizek):
                e[i, j, k] = (op[i,j] - kgrid[k] + ((1 - delta) * kgrid[j]) -
                           ((psi / 2) * ((kgrid[k] - ((1 - delta) * kgrid[j])) ** 2)
                            / kgrid[j]))

    # value function iteration
    VFtol = 1e-6
    VFdist = 7.0
    VFmaxiter = 3000
    V = np.zeros((sizez, sizek)) 
    Vmat = np.zeros((sizez, sizek, sizek))  
    Vstore = np.zeros((sizez, sizek, VFmaxiter)) 
    VFiter = 1

    start_time = time.clock()
    while VFdist > VFtol and VFiter < VFmaxiter:
        TV = V    
        Vmat = VFI_loop(V, e, betafirm, sizez, sizek, Vmat, pi)
        Vstore[:, :, VFiter] = V.reshape(sizez, sizek,) 
        V = Vmat.max(axis=2) 
        PF = np.argmax(Vmat, axis=2) 
        Vstore[:,:, i] = V  
        VFdist = (np.absolute(V - TV)).max()  
        VFiter += 1
    
    VFI_time = time.clock() - start_time
    if VFiter < VFmaxiter:
        print('Value function converged after this many iterations:', VFiter)
    else:
        print('Value function did not converge')
    print('VFI took ', VFI_time, ' seconds to solve')  
    VF = V

    # optimal capital stock k'
    optK = kgrid[PF]     

    # stationary distribution
    Gamma = np.ones((sizez, sizek)) * (1 / (sizek * sizez))
    SDtol = 1e-12
    SDdist = 7
    SDiter = 0
    SDmaxiter = 1000
    while SDdist > SDtol and SDmaxiter > SDiter:
        HGamma = SD_loop(PF, pi, Gamma, sizez, sizek)
        SDdist = (np.absolute(HGamma - Gamma)).max()
        Gamma = HGamma
        SDiter += 1
    if SDiter < SDmaxiter:
        print('Stationary distribution converged after this many iterations: ',SDiter)
    else:
        print('Stationary distribution did not converge')

    # optimal investment for an individual firm
    opti = optK - (1 - delta) * kgrid 
    
    # labor demand for an individual firm
    ld = np.zeros((sizez, sizek))
    for i in range(sizez):
        for j in range(sizek):
            ld[i,j] = (((a_l / w) ** (1 / (1 - a_l))) *
          ((kgrid[j] ** a_k) ** (1 / (1 - a_l))) * (z_grid[i] ** (1/(1 - a_l))))

    # adjustment costs
    adj_cost = psi / 2 * np.multiply((opti)**2, 1 / kgrid) 

    # output per firm
    y = (np.multiply(np.multiply((ld) ** a_l, kgrid ** a_k), np.transpose([z_grid])))

    # aggregate labor demand
    LD = np.multiply(ld, Gamma).sum() 

    # aggregate investment
    I = np.multiply(opti, Gamma).sum()  

    # aggregate adjustment costs
    ADJ = np.multiply(adj_cost, Gamma).sum()  

    # aggregate output
    Y = np.multiply(y, Gamma).sum()  

    # aggregate consumption
    C = Y - I - ADJ  

    # aggregate labor supply
    LS = w / (h * C)  

    # distance between aggregate labor demand and aggregate labor supply
    dist = abs(LD - LS) 

    print('|Ld-Ls|:', dist)
    print('wage:', w)
       
    return dist