Пример #1
0
def bench(func):
    """Times a single function."""
    sys.stdout.write('%44s   ' % format_func(func))
    sys.stdout.flush()

    # figure out how many times we have to run the function to
    # get reliable timings.
    for i in xrange(3, 10):
        rounds = 1 << i
        t = timer()
        for x in xrange(rounds):
            func()
        if timer() - t >= 0.2:
            break

    # now run the tests without gc TEST_RUNS times and use the median
    # value of these runs.
    def _run():
        gc.collect()
        gc.disable()
        try:
            t = timer()
            for x in xrange(rounds):
                func()
            return (timer() - t) / rounds * 1000
        finally:
            gc.enable()

    delta = median(_run() for x in xrange(TEST_RUNS))
    sys.stdout.write('%.4f\n' % delta)
    sys.stdout.flush()

    return delta
Пример #2
0
    def detect(self, det_iter, show_timer=False):
        """
        detect all images in iterator

        Parameters:
        ----------
        det_iter : DetIter
            iterator for all testing images
        show_timer : Boolean
            whether to print out detection exec time

        Returns:
        ----------
        list of detection results
        """
        num_images = det_iter._size
        if not isinstance(det_iter, mx.io.PrefetchingIter):
            det_iter = mx.io.PrefetchingIter(det_iter)
        start = timer()
        detections = self.mod.predict(det_iter).asnumpy()
        time_elapsed = timer() - start
        if show_timer:
            print("Detection time for {} images: {:.4f} sec".format(
                num_images, time_elapsed))
        result = []
        for i in range(detections.shape[0]):
            det = detections[i, :, :]
            res = det[np.where(det[:, 0] >= 0)[0]]
            result.append(res)
        return result
Пример #3
0
 def solve(self):
     """
     Shoot: compute initial momentum to satisfy BVP
     by solving nonlinear system
     """
     print("Shoot: Solving with ",self.no_steps, "steps...")
     sys.stdout.flush()
     self._init_path()
     Pin=np.zeros(self.s0)
     #
     opt={}
     opt['xtol']=self.xtol
     opt['factor']=10
     #
     start=timer()
     Pout=spo.root(self._objective,  np.ravel(Pin),
                   jac=self._Jacobian,  options=opt)
     #
     if Pout['success']==False:
         print("Heavy artillery brought in...")
         sys.stdout.flush()
         P=Pout['x'].reshape(self.s0)
         opt['maxiter']=int(1e4)
         Pout=spo.root(self._objective,  np.ravel(P),
                       jac=self._Jacobian, method='lm',
                       options=opt)
     end=timer()
     print("Run time %3.1f secs" % (end-start))
     #
     print(Pout['message'], "Success=", Pout['success'])
     assert Pout['success']
     P0=Pout['x'].reshape(self.s0)
     self.set_path(P0, self.landmarks[0,:,:])
     return P0
Пример #4
0
 def test_quick_sort(self):
     global a, sorted_a
     start = timer()
     a = quick_sort(a)
     time_taken = timer() - start
     print_array("Quick Sort", a, time_taken)
     self.assertListEqual(a.tolist(), sorted_a)
Пример #5
0
def exhaustive_eval(player_card, table_card):
	"""compute all possible games given the player/table cards (as a numbers from 0 to 51) and return equity win/tie for each player"""

	p = player_card.shape[0]
	equity_arr = np.zeros([p, 2], dtype=np.float32)

	print '\n---------------- Exhaustive eval start'
	print 'player_card=\n{}'.format(player_card)
	print 'table_card=\n{}'.format(table_card)
	print 'p={}'.format(p)

	t0 = timer()

	equity_arr = exhaustive_block_fast(player_card, table_card,
												keys.CARD_FLUSH_KEY,
												keys.CARD_FACE_KEY,
												keys.CARD_SUIT,
												keys.SUIT_MASK,
												keys.SUIT_BIT_SHIFT,
												EvalSeven.flush_rank,
												EvalSeven.face_rank,
												EvalSeven.flush_suit)

	t1 = timer()
	print 'run time = \t{:.6f} s'.format(t1-t0)
	print equity_arr
	print '\n---------------- Exhaustive eval end'

	return equity_arr
Пример #6
0
async def async_million_events(hass):
    """Run a million events."""
    count = 0
    event_name = 'benchmark_event'
    event = asyncio.Event(loop=hass.loop)

    @core.callback
    def listener(_):
        """Handle event."""
        nonlocal count
        count += 1

        if count == 10**6:
            event.set()

    hass.bus.async_listen(event_name, listener)

    for _ in range(10**6):
        hass.bus.async_fire(event_name)

    start = timer()

    await event.wait()

    return timer() - start
Пример #7
0
async def async_million_time_changed_helper(hass):
    """Run a million events through time changed helper."""
    count = 0
    event = asyncio.Event(loop=hass.loop)

    @core.callback
    def listener(_):
        """Handle event."""
        nonlocal count
        count += 1

        if count == 10**6:
            event.set()

    hass.helpers.event.async_track_time_change(listener, minute=0, second=0)
    event_data = {
        ATTR_NOW: datetime(2017, 10, 10, 15, 0, 0, tzinfo=dt_util.UTC)
    }

    for _ in range(10**6):
        hass.bus.async_fire(EVENT_TIME_CHANGED, event_data)

    start = timer()

    await event.wait()

    return timer() - start
Пример #8
0
async def async_million_state_changed_helper(hass):
    """Run a million events through state changed helper."""
    count = 0
    entity_id = 'light.kitchen'
    event = asyncio.Event(loop=hass.loop)

    @core.callback
    def listener(*args):
        """Handle event."""
        nonlocal count
        count += 1

        if count == 10**6:
            event.set()

    hass.helpers.event.async_track_state_change(
        entity_id, listener, 'off', 'on')
    event_data = {
        'entity_id': entity_id,
        'old_state': core.State(entity_id, 'off'),
        'new_state': core.State(entity_id, 'on'),
    }

    for _ in range(10**6):
        hass.bus.async_fire(EVENT_STATE_CHANGED, event_data)

    start = timer()

    await event.wait()

    return timer() - start
Пример #9
0
def _logbook_filtering(hass, last_changed, last_updated):
    from homeassistant.components import logbook

    entity_id = 'test.entity'

    old_state = {
        'entity_id': entity_id,
        'state': 'off'
    }

    new_state = {
        'entity_id': entity_id,
        'state': 'on',
        'last_updated': last_updated,
        'last_changed': last_changed
    }

    event = core.Event(EVENT_STATE_CHANGED, {
        'entity_id': entity_id,
        'old_state': old_state,
        'new_state': new_state
    })

    def yield_events(event):
        # pylint: disable=protected-access
        entities_filter = logbook._generate_filter_from_config({})
        for _ in range(10**5):
            if logbook._keep_event(event, entities_filter):
                yield event

    start = timer()

    list(logbook.humanify(None, yield_events(event)))

    return timer() - start
Пример #10
0
def import_images(folder, par=True, ttime=True):
  """
  This function loads images from a folder as PIL Image files and
  thresholds them, creating a list of z-slices to be turned into a matrix
  This version is not currently used.
  """
  fils = [os.listdir(folder)]
  def keep_tifs(rawlist):
    tiflist = []
    for f in rawlist:
      if len(f.split('.'))>1:
        if f.split('.')[1] == 'tif':
          tiflist.append(f)
    return tiflist
  tiflist = keep_tifs(fils)
  newtiflist = [folder+f for f in tiflist].sort() # alphabetize
  tifobjs = [load_img_array(f) for f in tiflist]
  
  # here start parallel stuff
  if par or ttime:
    start_time_par = timer()
    pool = Pool(8)
    results_par = pool.map(show_at_thresh, tifobjs)
    pool.close()
    pool.join()
    total_time_par = timer() - start_time_par
  # or non-parallel stuff
  elif par==False or ttime:
    start_time_nopar = timer()
    results_nopar = [show_at_thresh(f) for f in newtiflist]
    total_time_nopar = timer() - start_time_nopar
  print('Time for parallel: %.2f seconds' % total_time_par)
  print('Time for non-parallel: %.2f seconds' % total_time_nopar)
  
  return results_par, results_nopar
Пример #11
0
def get_voxel_locations(folder, fname, voxel=[0.176,0.176,0.38], ssave=False):
  # uses raw threshold function
  # get images as list
  fils = os.listdir(folder)
  def keep_tifs(rawlist):
    tiflist = []
    for f in rawlist:
      if len(f.split('.'))>1:
        if f.split('.')[1] == 'tif':
          tiflist.append(f)
    return tiflist
  tiflist = keep_tifs(fils) # this indexing may be removed if needed
  newtiflist = [folder+f for f in tiflist]
  newtiflist.sort() # alphabetize
  # commandeer all cores
  stime = timer()
  # pool = Pool()
  darr = [make_binary_thresh(i) for i in newtiflist] # adjust threshold in function
  # pool.close()
  # pool.join()
  print('Time taken for retrieving coordinates: %.2f' %(timer()-stime))
  # send to matrix2coords to get tuples back
  coords = matrix2coords(darr, voxel)
  if ssave:
    # save this
    save_coords(coords, fname)
  
  return coords, darr
def driver(pricer, do_plot=False):
    paths = np.zeros((NumPath, NumStep + 1), order='F')
    paths[:, 0] = StockPrice
    DT = Maturity / NumStep

    ts = timer()
    pricer(paths, DT, InterestRate, Volatility)
    te = timer()
    elapsed = te - ts

    ST = paths[:, -1]
    PaidOff = np.maximum(paths[:, -1] - StrikePrice, 0)
    print 'Result'
    fmt = '%20s: %s'
    print fmt % ('stock price', np.mean(ST))
    print fmt % ('standard error', np.std(ST) / np.sqrt(NumPath))
    print fmt % ('paid off', np.mean(PaidOff))
    optionprice = np.mean(PaidOff) * np.exp(-InterestRate * Maturity)
    print fmt % ('option price', optionprice)

    print 'Performance'
    NumCompute = NumPath * NumStep
    print fmt % ('Mstep/second', '%.2f' % (NumCompute / elapsed / 1e6))
    print fmt % ('time elapsed', '%.3fs' % (te - ts))

    if do_plot:
        pathct = min(NumPath, MAX_PATH_IN_PLOT)
        for i in xrange(pathct):
            pyplot.plot(paths[i])
        print 'Plotting %d/%d paths' % (pathct, NumPath)
        pyplot.show()
    return elapsed
Пример #13
0
def bf(n):
    start=timer()
    epsilon=0.00001
    a=0
    b=0
    for x1 in range(0,n+1):
        for y1 in range(0,n+1):
            if x1==0 and y1==0:
                continue
            v1=np.array([x1,y1])
            for x2 in range(0,n+1):
                for y2 in range(0,n+1):
                    if x2==0 and y2==0:
                        continue
                    v2=np.array([x2,y2])
#                    if np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))>1-epsilon:
#                        continue
                    if np.array_equal(v1,v2):
                        continue
                    if abs(np.dot((v1-v2),v2))<epsilon:
                        b+=1
                        continue
    print('Count: ',int(n**2) +b)
    print('b: ',b)
    print('Elapsed time: ',timer()-start,'s')
Пример #14
0
    def breedChildren(self,innovations):

        self.breedtime = 0.0
        self.mutatetime = 0.0

        crossoverchance = float(self.settings["crossover"])

        # update root genome
        self.root_genome = random.choice(self.l_genomes)

        children = []

        # copy genome 0
        # children.append(self.l_genomes[0])

        # loop through breed
        for i in range(self.breed):
            bt0  = timer()
            if random.random() <= crossoverchance and len(self.l_genomes)>1:
                child= genome.crossover(random.sample(self.l_genomes, 2), self.settings)
            else:
                child = genome.copyGenome(random.choice(self.l_genomes), self.settings)
            bt1  = timer()
            self.breedtime +=  (bt1-bt0)
            child.mutate(innovations)
            bt2  = timer()
            self.mutatetime += (bt2-bt1)

            children.append(child)

        return children
Пример #15
0
    def breedChildren(self,innovations):

        self.breedtime = 0.0
        self.mutatetime = 0.0

        crossoverchance = float(self.settings["crossover"])

        new_gen_children = []
        # breed all the children for each species
        for s in self.l_species:
            new_gen_children.extend(s.breedChildren(innovations))
            self.breedtime += s.breedtime
            self.mutatetime += s.mutatetime

        # fill the remainder with wild children
        remainder = self.population-len(new_gen_children)
        all_Gs = self.getAllGenomes()
        for i in range(remainder):
            bt0 = timer()
            if random.random() <= crossoverchance and len(all_Gs)>1:
                child= genome.crossover(random.sample(all_Gs, 2), self.settings)
            else:
                child = genome.copyGenome(random.choice(all_Gs), self.settings)
            bt1 = timer()
            self.breedtime += (bt1-bt0)
            child.mutate(innovations)
            bt2 = timer()
            self.mutatetime += (bt2-bt1)
            new_gen_children.append(child)

        self.newGeneration(new_gen_children)
Пример #16
0
    def handle_result(self, solver, t, y):
        #
        #Post processing (stores the time points).
        #
        time_start = timer()
        
        #Moving data to the model
        if t != self._model.time or (not self._f_nbr == 0 and not (self._model.continuous_states == y).all()):
            #Moving data to the model
            self._model.time = t
            #Check if there are any states
            if self._f_nbr != 0:
                self._model.continuous_states = y

            #Sets the inputs, if any
            if self.input!=None:
                self._model.set(self.input[0], self.input[1].eval(t)[0,:])

            #Evaluating the rhs (Have to evaluate the values in the model)
            rhs = self._model.get_derivatives()

        #Sets the parameters, if any
        if self.parameters != None:
            p_data = N.array(solver.interpolate_sensitivity(t, 0)).flatten()

        self.export.integration_point(solver)#parameter_data=p_data)
        
        self.timings["handle_result"] += timer() - time_start
Пример #17
0
    def handle_result(self, solver, t, y):
        """
        Post processing (stores the time points).
        """
        time_start = timer()
        
        if self._extra_f_nbr > 0:
            y_extra = y[-self._extra_f_nbr:]
            y       = y[:-self._extra_f_nbr]
            
        #Moving data to the model
        if t != self._model.time or (not self._f_nbr == 0 and not (self._model.continuous_states == y).all()):
            #Moving data to the model
            self._model.time = t
            #Check if there are any states
            if self._f_nbr != 0:
                self._model.continuous_states = y

            #Sets the inputs, if any
            self._set_input_values(t)
        
            #Evaluating the rhs (Have to evaluate the values in the model)
            rhs = self._model.get_derivatives()

        self.export.integration_point(solver)
        if self._extra_f_nbr > 0:
            self._extra_equations.handle_result(self.export, y_extra)
            
        self.timings["handle_result"] += timer() - time_start
Пример #18
0
def test_full_client(msg_mass):
    ## Run a single client on single CPU and test-stress it.
    
    (sometx, mesages_q) = msg_mass
    (factory, instance, tr) = sometx

    responses = []
    t0 = timer()
    for (tx, data, core) in mesages_q:
        tr.clear()
        instance.lineReceived(data)
        response = tr.value()
        responses += [(tx, data, core, response)]
    t1 = timer()
    print "\nQuery message rate: %2.2f / sec" % (1.0 / ((t1-t0)/(len(mesages_q))))

    ## Now we test the Commit
    t0 = timer()
    for (tx, data, core, response) in responses:
        resp = response.split(" ")
        k, s = map(b64decode, resp[1:])
        assert resp[0] == "OK"
        tr.clear()
        data = package_commit(core, [(k, s)])
        instance.lineReceived(data)
        flag, pub, sig = tr.value().split(" ")
        assert flag == "OK"
    t1 = timer()
    print "\nCommit message rate: %2.2f / sec" % (1.0 / ((t1-t0)/(len(responses))))
Пример #19
0
    def handle_result(self, solver, t, y):
        """
        Post processing (stores the time points).
        """
        time_start = timer()
        
        #Moving data to the model
        if t != self._model.time or (not self._f_nbr == 0 and not (self._model.continuous_states == y).all()):
            #Moving data to the model
            self._model.time = t
            #Check if there are any states
            if self._f_nbr != 0:
                self._model.continuous_states = y

            #Sets the inputs, if any
            if self.input!=None:
                self._model.set_real(self.input_value_refs, self.input[1].eval(t)[0,:]*self.input_alias_type)

            #Evaluating the rhs (Have to evaluate the values in the model)
            rhs = self._model.get_derivatives()
        
        if self.export != None:
            self.export.integration_point()
            
        self.timings["handle_result"] += timer() - time_start
Пример #20
0
def main():

	start = timer()

	count = 0

	table = load_metadata_table('metadata/fiction_metadata.csv')
	with open('results/fictiondedup.csv', 'w') as outfile:
		for grouping in generate_clumps(table, 0.7):
			while len(grouping) > 0:
				compare_element = grouping.pop(0)
				count += 1
				print count
				text_group = []
				for i in xrange(len(grouping)-1, -1, -1):
				    if compare_element.test_similarity(grouping[i]):
				    	count += 1
				    	print count
				    	text_group.append(grouping[i])
				        del grouping[i]
				if len(text_group) > 0:
					outfile.write("\'"+compare_element.htid+"\',\'"+compare_element.title+"\',\'"+compare_element.author+"\',\'"+str(compare_element.K)+"\',\'"+compare_element.imprint+"\',\'"+compare_element.enumcron+"\',\'"+str(compare_element.totalpages)+"\'\n")
					for found in text_group:
						outfile.write("\'"+found.htid+"\',\'"+found.title+"\',\'"+found.author+"\',\'"+str(found.K)+"\',\'"+found.imprint+"\',\'"+found.enumcron+"\',\'"+str(found.totalpages)+"\'\n")
					outfile.write("\n")


	end = timer()
	print end - start
Пример #21
0
    def copy_sites(self):
        self.break_copy = False
        number_site = 1
        for site in self.list_sites:

            time_go = timer()
            path_site_origin = site.path_origin + '/*'
            path_data_mt = user + '/PampaMT/DADOS_MT/' + self.project.name + '/' + site.name + '/'
            path_mkdir = user + '/PampaMT/DADOS_MT/' + self.project.name + '/' + site.name

            if (self.break_copy == False) and (site.copy == False):
                os.mkdir(path_mkdir)
                os.system('cp -r ' + path_site_origin + ' ' + path_data_mt)
                site.copy = True

                self.project.sites = self.list_sites
                save(self.project)
                #self.project.save()


            else:
                self.popup_copy_site.title = lang['Canceling']
                os.system('rm -r ' + user + '/PampaMT/DADOS_MT/' + self.project.name + '/*')
                self.popup_copy_site.dismiss()
                break

            self.popup_copy_site.count_time = number_site
            time_end = timer()
            self.popup_copy_site.time_one_step = time_end - time_go
            number_site += 1
            self.popup_copy_site.title = self.popup_copy_site.title_pop + '      ' + site.name

        if self.break_copy == False:
            self.popup_copy_site.dismiss()
            self.open_pop_convert_bin_asc()
Пример #22
0
    def get_result(self):
        """
        Write result to file, load result data and create an FMICSResult
        object.

        Returns::

            The FMICSResult object.
        """
        time_start = timer()
        
        if self.options["return_result"]:
            # Get the result
            res = self.result_handler.get_result()
        else:
            res = None
            
        end_time = timer()
        self.timings["returning_result"] = end_time - time_start
        self.timings["other"] = end_time - self.time_start_total- sum(self.timings.values())
        self.timings["total"] = end_time - self.time_start_total

        # create and return result object
        return FMIResult(self.model, self.result_file_name, None,
            res, self.options, status=self.status, detailed_timings=self.timings)
def main(command, args):
    print("Apk Mass Installer Utility \nVersion: 3.1\n")

    adb_kill()  # kill any instances of adb before starting if any

    # wait for adb to detect phone
    while True:
        if adb_state():
            break
        print("No phone connected waiting to connect phone")
        time.sleep(1)

    print("Starting adb server...")
    adb_start()  # start an instance of adb server

    t_start = timer()

    if "backup" in command:
        archive = args.pop("archive", False)
        encrypt = args.pop("encrypt", False)
        back_up(archive, encrypt)

    elif "restore" in command:
        path = args.pop("path")
        restore(path)

    human_time(t_start, timer())

    adb_kill()
Пример #24
0
def create_all_preflop_two_hand_equity(verbose=False, save=False, distributed=False, nb_process=4):
	"""returns preflop_two_hand_equity for all two hand preflop combinations"""
	global all_preflop_two_hands

	print '\n--------------- start create_all_preflop_two_hand_equity'
	print 'all preflop two hands = \nstart = {}\nend = {}\nnb of elements = {}'.format(all_preflop_two_hands[:5], all_preflop_two_hands[-5:], len(all_preflop_two_hands))

	t0 = timer()

	if (distributed):
		pool = ThreadPool(nb_process)
		equity = pool.map(preflop_two_hand_equity, all_preflop_two_hands[:])
		pool.close()
		pool.join()
	else:
		equity = []
		for k, p in enumerate(all_preflop_two_hands[:]):
			if (verbose):
				# print k,' - ', p
				sys.stdout.write('\rk=%5d / %5d : {}' % (k+1, len(all_preflop_two_hands)), p)
				sys.stdout.flush()
			equity.append(preflop_two_hand_equity(p))

	t1 = timer()
	print 'all_preflop_two_hand_equity time = {:9.4f} s'.format(t1-t0)
	print 'exact number of distinct (rankwise) pairs of preflop hands = {}'.format(np.array([len(e) for e in equity]).sum())
	if (save):
		cPickle.dump(equity, open(os.path.join('Tables', 'all_preflop_two_hand_equity.pk'), 'wb'))
		print '{} saved to disk as {}'.format('equity', os.path.join('Tables', 'all_preflop_two_hand_equity.pk'))
	return equity
Пример #25
0
    def comm_test(self, show=True):

        logging.info("Starting communication test...")

        # TODO: Use part of nearest neighbour data for test data
        # data = (leds.nearestNeighbours[:, 0] % 256).astype('int8').tobytes()
        # mid_point = leds.numLeds[0]
        data = bytes(range(1, 101))

        responses = []
        timings = []
        for tys in self.tys:
            start = timer()
            self.send_bytes(tys, b'X' + data + b'\n')
            response = tys.serial.readline().rstrip()
            end = timer()
            responses.append(response)
            timings.append(end - start)

        if show:
            for i, x in enumerate(zip(responses, timings)):
                print("Teensy %d: %s, %.4fs" %
                      (i, x[0].decode('utf8'), x[1]))

        if all([r == b'OK' for r in responses]):
            logging.info("Communication test successful.")
            return True
        else:
            logging.info("Communication test failed.")
            return False
Пример #26
0
def bench2():
    print "Bench 2"
    its = 30
    cons = Box(np.array([[0, 10], [0, 10]]))
    x_init = np.array([1, 1])
    goal = Box(np.array([[9, 9.5], [1, 1.5]]))
    obstacles = [Polytope(np.array([[1, 5,0], [1, 5,10], [1, 5.3,10], [1, 5.3,0]]), False)]

    drm_nodes = []
    drm_times = []
    for i in range(its):
        print "it {0}".format(i)
        start = timer()
        try:
            drm = DRMotion(cons, obstacles, 1, 0.5, 1)
            t, cur = drm.build_tree(x_init, goal)
        except DRMNotConnected as e:
            drm_nodes.append(len(e.tree_progress.nodes()))
            pass
        end = timer()
        drm_times.append(end - start)

    print "drm nodes: max {0} min {1} avg {2}".format(
        max(drm_nodes), min(drm_nodes), sum(drm_nodes) / float(its))
    print "drm times: max {0} min {1} avg {2}".format(
        max(drm_times), min(drm_times), sum(drm_times) / float(its))
Пример #27
0
def bench1():
    print "Bench 1"
    its = 30
    cons = Box(np.array([[0, 10], [0, 10]]))
    x_init = np.array([1, 1])
    goal = Box(np.array([[9, 9.5], [1, 1.5]]))
    obstacles = [Polytope(np.array([[1, 5,0], [1, 5,9.4], [1, 6,9.4], [1, 6,0]]), False)]

    drm_nodes = []
    rrt_nodes = []
    drm_times = []
    rrt_times = []
    for i in range(its):
        print "it {0}".format(i)
        start = timer()
        drm = DRMotion(cons, obstacles, 1, 0.5, 1)
        t, cur = drm.build_tree(x_init, goal)
        end = timer()
        drm_nodes.append(len(t.nodes()))
        drm_times.append(end - start)
        start = timer()
        rrt = RRT(cons, obstacles, 1)
        t, cur = rrt.build_tree(x_init, goal)
        rrt_nodes.append(len(t.nodes()))
        end = timer()
        rrt_times.append(end - start)

    print "drm nodes: max {0} min {1} avg {2}".format(
        max(drm_nodes), min(drm_nodes), sum(drm_nodes) / float(its))
    print "drm times: max {0} min {1} avg {2}".format(
        max(drm_times), min(drm_times), sum(drm_times) / float(its))
    print "rrt nodes: max {0} min {1} avg {2}".format(
        max(rrt_nodes), min(rrt_nodes), sum(rrt_nodes) / float(its))
    print "rrt times: max {0} min {1} avg {2}".format(
        max(rrt_times), min(rrt_times), sum(rrt_times) / float(its))
Пример #28
0
def test_ec_bin_translation():
    from timeit import default_timer as timer

    G = EcGroup()
    o = G.order()
    g = G.generator()
    pt1000 = [o.random() * g for _ in range(1000)]

    exp = []
    for pt in pt1000:
        exp += [pt.export()]

    t0 = timer()
    for ept in exp:
        EcPt.from_binary(ept, G)
    t1 = timer()
    print("\nParsed compressed Pt: %2.4f" % (t1 - t0))

    exp = []
    for pt in pt1000:
        exp += [pt.export(EcPt.POINT_CONVERSION_UNCOMPRESSED)]

    t0 = timer()
    for ept in exp:
        EcPt.from_binary(ept, G)
    t1 = timer()
    print("\nParsed uncompressed Pt: %2.4f" % (t1 - t0))
Пример #29
0
def p88old(nmax):
    """returns minmal sum-product numbers up to n digits"""
    start=timer()
#    pset=set()
    psns={}
    primes=primesfrom2to(nmax)
    for n in range(2,nmax+1):
        psn,m,solution=psnmin(n,primes)
        psns[n]=(psn,m,solution)
#        print(n,psn,m,solution)
#    return
    count=0
    psums=[]
    for k,v in psns.items():
        a,b=v[2][0],v[2][1]
        newn=0
        m=2
        for c in [2,3]:
            while newn<nmax:                
                m+=1
                newn,newpsn,newsol=psnmin2(a,b,c,m)
    #            print(k,newn,newpsn,newsol)
                if newn<=nmax and newpsn<psns[newn][0]:
                    count+=1
                    psns[newn]=(newpsn,m,newsol)
    #                print(k,newn,psns[newn])
        psums.append(sum(set([v[0] for k,v in psns.items()])))
#    print (pset)
#    print(sum(pset))
    plt.plot(psums)
    print(psums[-1])
    print('replacements:',count)
    print('Elapsed time:',timer()-start)
    return psns    
Пример #30
0
    def run(self):
        start = timer()
        cxx_std = self.benchmark_definition['cxx_std']
        num_bindings = self.benchmark_definition['num_bindings']
        compiler_executable_name = self.benchmark_definition['compiler']
        benchmark_generation_flags = self.benchmark_definition['benchmark_generation_flags']

        other_compile_flags = []
        if 'use_old_style_fruit_component_install_syntax' in benchmark_generation_flags:
            other_compile_flags.append('-DUSE_OLD_STYLE_FRUIT_COMPONENT_INSTALL_SYNTAX')
            other_compile_flags.append('-Wno-deprecated-declarations')

        run_command(compiler_executable_name,
                    args = compile_flags + other_compile_flags + [
                        '-std=%s' % cxx_std,
                        '-DMULTIPLIER=%s' % (num_bindings // 5),
                        '-I', self.fruit_sources_dir + '/include',
                        '-I', self.fruit_build_tmpdir + '/include',
                        '-ftemplate-depth=1000',
                        '-c',
                        self.fruit_benchmark_sources_dir + '/extras/benchmark/compile_time_benchmark.cpp',
                        '-o',
                        '/dev/null',
                    ])
        end = timer()
        return {"compile_time": end - start}
Пример #31
0
def runtime_analysis():
    from timeit import default_timer as timer
    start = timer()
    print('test runtime')
    end = timer()
    print(end - start)
Пример #32
0
Created on Tue Feb 18 09:38:49 2020

@author: sebas
"""
"""
Script generated with all algorithms used in dimensioning and reinforce
gathered in a single document and optimized (I hope!)
"""
# Importing needed modules
# ========================
import os
from timeit import default_timer as timer
import unitskN_m_C as units
import DesignProcedures as dsgnpcs

startime = timer()
directory = os.getcwd()

# IMPORTANT DATA FOR GENERATING THE MODEL.
# ========================================
# Model Definition Data
# ---------------------
ndm = 3
ndf = 6
# Geometric definitions
# ---------------------
NbaysX = [4, 6, 8, 10]  # [INTEGER] number of bays in X direction.
NbaysZ = 4  # [INTEGER] number of bays in X direction.
XbayL = 5.0  # [m] length of bays in X direction.
ZbayL = 5.0  # [m] length of bays in X direction.
StoryH = 3.0  # [m] story height - uniform for all levels.
Пример #33
0
    def ssp_fit(self,
                input_z,
                input_sigma,
                input_Av,
                fit_data,
                fit_scheme='nnls'):

        #Quick naming
        obs_wave = fit_data['obs_wave_resam']
        obs_flux_masked = fit_data['obs_flux_norm_masked']
        rest_wave = fit_data['basesWave_resam']
        bases_flux = fit_data['bases_flux_norm']
        int_mask = fit_data['int_mask']
        obsFlux_mean = fit_data['normFlux_obs']

        #Apply physical data to stellar grid
        ssp_grid = self.physical_SED_model(rest_wave, obs_wave, bases_flux,
                                           input_Av, input_z, input_sigma, 3.1)
        ssp_grid_masked = (int_mask * ssp_grid.T).T

        #---Leasts square fit
        if fit_scheme == 'lsq':
            start = timer()
            optimize_result = lsq_linear(ssp_grid_masked,
                                         obs_flux_masked,
                                         bounds=(0, inf))
            end = timer()
            print 'lsq', ' time ', (end - start)

            coeffs_bases = optimize_result.x * obsFlux_mean

        elif fit_scheme == 'nnls':
            start = timer()
            optimize_result = nnls(ssp_grid_masked, obs_flux_masked)
            end = timer()
            print 'nnls', ' time ', (end - start), '\n'

            coeffs_bases = optimize_result[0] * obsFlux_mean

        #---Linear fitting without restrictions
        else:

            start = timer()

            #First guess
            coeffs_bases = self.linfit1d(obs_flux_masked, obsFlux_mean,
                                         ssp_grid_masked, inv_pdl_error_i)

            #Count positive and negative coefficients
            idx_plus_0 = coeffs_bases[:] > 0
            plus_coeff = idx_plus_0.sum()
            neg_coeff = (~idx_plus_0).sum()

            #Start loops
            counter = 0
            if plus_coeff > 0:

                while neg_coeff > 0:
                    counter += 1

                    bases_model_n = zeros([nObsPix, plus_coeff])

                    idx_plus_0 = (coeffs_bases[:] > 0)
                    bases_model_n[:, 0:idx_plus_0.sum(
                    )] = bases_grid_model_masked[:,
                                                 idx_plus_0]  #These are replace in order
                    coeffs_bases[~idx_plus_0] = 0

                    #Repeat fit
                    coeffs_n = self.linfit1d(obsFlux_normMasked, obsFlux_mean,
                                             bases_model_n, inv_pdl_error_i)

                    idx_plus_n = coeffs_n[:] > 0
                    idx_min_n = ~idx_plus_n
                    plus_coeff = idx_plus_n.sum()
                    neg_coeff = (idx_min_n).sum()

                    #Replacing negaive by zero
                    coeffs_n[idx_min_n] = 0
                    coeffs_bases[idx_plus_0] = coeffs_n

                    if plus_coeff == 0:
                        neg_coeff = 0
            else:
                plus_coeff = nBases

            end = timer()
            print 'FIT3D', ' time ', (end - start)

        #Save data to export
        fit_products = {}
        flux_sspFit = np_sum(coeffs_bases.T * ssp_grid_masked, axis=1)
        fluxMasked_sspFit = flux_sspFit * int_mask
        fit_products['flux_components'] = coeffs_bases.T * ssp_grid_masked
        fit_products['weight_coeffs'] = coeffs_bases
        fit_products['flux_sspFit'] = flux_sspFit
        fit_products['fluxMasked_sspFit'] = fluxMasked_sspFit

        return fit_products
    with open(location, "rb") as file:
        job = client.load_table_from_file(file, table, job_config=config)
else:
    job = client.load_table_from_uri(gcs_uri, table, job_config=config)

print ("Loading {} file {} into dataset {} as table {}...".format \
      (("local" if local else "GCS"),(location if local else gcs_uri), dataset_name, table_name))

# See if we have a timer
try:
    timer
    use_timer = True
except NameError:
    use_timer = False

if (use_timer):
    start = timer()

# Performs the load and waits for result
job.result()

if (use_timer):
    end = timer()
    result_time = " in {0:.4f}s".format(end-start)
else:
    result_time = ""


# Prints results
print("{} rows were loaded{}.".format(job.output_rows, result_time))
Пример #35
0
def train(
    lang,
    output_path,
    train_path,
    dev_path,
    raw_text=None,
    base_model=None,
    pipeline="tagger,parser,ner",
    vectors=None,
    n_iter=30,
    n_early_stopping=None,
    n_examples=0,
    use_gpu=-1,
    version="0.0.0",
    meta_path=None,
    init_tok2vec=None,
    parser_multitasks="",
    entity_multitasks="",
    noise_level=0.0,
    orth_variant_level=0.0,
    eval_beam_widths="",
    gold_preproc=False,
    learn_tokens=False,
    textcat_multilabel=False,
    textcat_arch="bow",
    textcat_positive_label=None,
    verbose=False,
    debug=False,
):
    """
    Train or update a spaCy model. Requires data to be formatted in spaCy's
    JSON format. To convert data from other formats, use the `spacy convert`
    command.
    """
    util.fix_random_seed()
    util.set_env_log(verbose)

    # Make sure all files and paths exists if they are needed
    train_path = util.ensure_path(train_path)
    dev_path = util.ensure_path(dev_path)
    meta_path = util.ensure_path(meta_path)
    output_path = util.ensure_path(output_path)
    if raw_text is not None:
        raw_text = list(srsly.read_jsonl(raw_text))
    if not train_path or not train_path.exists():
        msg.fail("Training data not found", train_path, exits=1)
    if not dev_path or not dev_path.exists():
        msg.fail("Development data not found", dev_path, exits=1)
    if meta_path is not None and not meta_path.exists():
        msg.fail("Can't find model meta.json", meta_path, exits=1)
    meta = srsly.read_json(meta_path) if meta_path else {}
    if output_path.exists() and [
            p for p in output_path.iterdir() if p.is_dir()
    ]:
        msg.warn(
            "Output directory is not empty",
            "This can lead to unintended side effects when saving the model. "
            "Please use an empty directory or a different path instead. If "
            "the specified output path doesn't exist, the directory will be "
            "created for you.",
        )
    if not output_path.exists():
        output_path.mkdir()

    # Take dropout and batch size as generators of values -- dropout
    # starts high and decays sharply, to force the optimizer to explore.
    # Batch size starts at 1 and grows, so that we make updates quickly
    # at the beginning of training.
    dropout_rates = util.decaying(
        util.env_opt("dropout_from", 0.2),
        util.env_opt("dropout_to", 0.2),
        util.env_opt("dropout_decay", 0.0),
    )
    batch_sizes = util.compounding(
        util.env_opt("batch_from", 100.0),
        util.env_opt("batch_to", 1000.0),
        util.env_opt("batch_compound", 1.001),
    )

    if not eval_beam_widths:
        eval_beam_widths = [1]
    else:
        eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")]
        if 1 not in eval_beam_widths:
            eval_beam_widths.append(1)
        eval_beam_widths.sort()
    has_beam_widths = eval_beam_widths != [1]

    # Set up the base model and pipeline. If a base model is specified, load
    # the model and make sure the pipeline matches the pipeline setting. If
    # training starts from a blank model, intitalize the language class.
    pipeline = [p.strip() for p in pipeline.split(",")]
    msg.text("Training pipeline: {}".format(pipeline))
    if base_model:
        msg.text("Starting with base model '{}'".format(base_model))
        nlp = util.load_model(base_model)
        if nlp.lang != lang:
            msg.fail(
                "Model language ('{}') doesn't match language specified as "
                "`lang` argument ('{}') ".format(nlp.lang, lang),
                exits=1,
            )
        nlp.disable_pipes([p for p in nlp.pipe_names if p not in pipeline])
        for pipe in pipeline:
            if pipe not in nlp.pipe_names:
                if pipe == "parser":
                    pipe_cfg = {"learn_tokens": learn_tokens}
                elif pipe == "textcat":
                    pipe_cfg = {
                        "exclusive_classes": not textcat_multilabel,
                        "architecture": textcat_arch,
                        "positive_label": textcat_positive_label,
                    }
                else:
                    pipe_cfg = {}
                nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))
            else:
                if pipe == "textcat":
                    textcat_cfg = nlp.get_pipe("textcat").cfg
                    base_cfg = {
                        "exclusive_classes": textcat_cfg["exclusive_classes"],
                        "architecture": textcat_cfg["architecture"],
                        "positive_label": textcat_cfg["positive_label"],
                    }
                    pipe_cfg = {
                        "exclusive_classes": not textcat_multilabel,
                        "architecture": textcat_arch,
                        "positive_label": textcat_positive_label,
                    }
                    if base_cfg != pipe_cfg:
                        msg.fail(
                            "The base textcat model configuration does"
                            "not match the provided training options. "
                            "Existing cfg: {}, provided cfg: {}".format(
                                base_cfg, pipe_cfg),
                            exits=1,
                        )
    else:
        msg.text("Starting with blank model '{}'".format(lang))
        lang_cls = util.get_lang_class(lang)
        nlp = lang_cls()
        for pipe in pipeline:
            if pipe == "parser":
                pipe_cfg = {"learn_tokens": learn_tokens}
            elif pipe == "textcat":
                pipe_cfg = {
                    "exclusive_classes": not textcat_multilabel,
                    "architecture": textcat_arch,
                    "positive_label": textcat_positive_label,
                }
            else:
                pipe_cfg = {}
            nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))

    if vectors:
        msg.text("Loading vector from model '{}'".format(vectors))
        _load_vectors(nlp, vectors)

    # Multitask objectives
    multitask_options = [("parser", parser_multitasks),
                         ("ner", entity_multitasks)]
    for pipe_name, multitasks in multitask_options:
        if multitasks:
            if pipe_name not in pipeline:
                msg.fail("Can't use multitask objective without '{}' in the "
                         "pipeline".format(pipe_name))
            pipe = nlp.get_pipe(pipe_name)
            for objective in multitasks.split(","):
                pipe.add_multitask_objective(objective)

    # Prepare training corpus
    msg.text("Counting training words (limit={})".format(n_examples))
    corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
    n_train_words = corpus.count_train()

    if base_model:
        # Start with an existing model, use default optimizer
        optimizer = create_default_optimizer(Model.ops)
    else:
        # Start with a blank model, call begin_training
        optimizer = nlp.begin_training(lambda: corpus.train_tuples,
                                       device=use_gpu)

    nlp._optimizer = None

    # Load in pretrained weights
    if init_tok2vec is not None:
        components = _load_pretrained_tok2vec(nlp, init_tok2vec)
        msg.text("Loaded pretrained tok2vec for: {}".format(components))

    # Verify textcat config
    if "textcat" in pipeline:
        textcat_labels = nlp.get_pipe("textcat").cfg["labels"]
        if textcat_positive_label and textcat_positive_label not in textcat_labels:
            msg.fail(
                "The textcat_positive_label (tpl) '{}' does not match any "
                "label in the training data.".format(textcat_positive_label),
                exits=1,
            )
        if textcat_positive_label and len(textcat_labels) != 2:
            msg.fail(
                "A textcat_positive_label (tpl) '{}' was provided for training "
                "data that does not appear to be a binary classification "
                "problem with two labels.".format(textcat_positive_label),
                exits=1,
            )
        train_docs = corpus.train_docs(
            nlp,
            noise_level=noise_level,
            gold_preproc=gold_preproc,
            max_length=0,
            ignore_misaligned=True,
        )
        train_labels = set()
        if textcat_multilabel:
            multilabel_found = False
            for text, gold in train_docs:
                train_labels.update(gold.cats.keys())
                if list(gold.cats.values()).count(1.0) != 1:
                    multilabel_found = True
            if not multilabel_found and not base_model:
                msg.warn("The textcat training instances look like they have "
                         "mutually-exclusive classes. Remove the flag "
                         "'--textcat-multilabel' to train a classifier with "
                         "mutually-exclusive classes.")
        if not textcat_multilabel:
            for text, gold in train_docs:
                train_labels.update(gold.cats.keys())
                if list(gold.cats.values()).count(1.0) != 1 and not base_model:
                    msg.warn(
                        "Some textcat training instances do not have exactly "
                        "one positive label. Modifying training options to "
                        "include the flag '--textcat-multilabel' for classes "
                        "that are not mutually exclusive.")
                    nlp.get_pipe("textcat").cfg["exclusive_classes"] = False
                    textcat_multilabel = True
                    break
        if base_model and set(textcat_labels) != train_labels:
            msg.fail(
                "Cannot extend textcat model using data with different "
                "labels. Base model labels: {}, training data labels: "
                "{}.".format(textcat_labels, list(train_labels)),
                exits=1,
            )
        if textcat_multilabel:
            msg.text(
                "Textcat evaluation score: ROC AUC score macro-averaged across "
                "the labels '{}'".format(", ".join(textcat_labels)))
        elif textcat_positive_label and len(textcat_labels) == 2:
            msg.text("Textcat evaluation score: F1-score for the "
                     "label '{}'".format(textcat_positive_label))
        elif len(textcat_labels) > 1:
            if len(textcat_labels) == 2:
                msg.warn(
                    "If the textcat component is a binary classifier with "
                    "exclusive classes, provide '--textcat_positive_label' for "
                    "an evaluation on the positive class.")
            msg.text(
                "Textcat evaluation score: F1-score macro-averaged across "
                "the labels '{}'".format(", ".join(textcat_labels)))
        else:
            msg.fail(
                "Unsupported textcat configuration. Use `spacy debug-data` "
                "for more information.")

    # fmt: off
    row_head, output_stats = _configure_training_output(
        pipeline, use_gpu, has_beam_widths)
    row_widths = [len(w) for w in row_head]
    row_settings = {
        "widths": row_widths,
        "aligns": tuple(["r" for i in row_head]),
        "spacing": 2
    }
    # fmt: on
    print("")
    msg.row(row_head, **row_settings)
    msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
    try:
        iter_since_best = 0
        best_score = 0.0
        for i in range(n_iter):
            train_docs = corpus.train_docs(
                nlp,
                noise_level=noise_level,
                orth_variant_level=orth_variant_level,
                gold_preproc=gold_preproc,
                max_length=0,
                ignore_misaligned=True,
            )
            if raw_text:
                random.shuffle(raw_text)
                raw_batches = util.minibatch(
                    (nlp.make_doc(rt["text"]) for rt in raw_text), size=8)
            words_seen = 0
            with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
                losses = {}
                for batch in util.minibatch_by_words(train_docs,
                                                     size=batch_sizes):
                    if not batch:
                        continue
                    docs, golds = zip(*batch)
                    nlp.update(
                        docs,
                        golds,
                        sgd=optimizer,
                        drop=next(dropout_rates),
                        losses=losses,
                    )
                    if raw_text:
                        # If raw text is available, perform 'rehearsal' updates,
                        # which use unlabelled data to reduce overfitting.
                        raw_batch = list(next(raw_batches))
                        nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
                    if not int(os.environ.get("LOG_FRIENDLY", 0)):
                        pbar.update(sum(len(doc) for doc in docs))
                    words_seen += sum(len(doc) for doc in docs)
            with nlp.use_params(optimizer.averages):
                util.set_env_log(False)
                epoch_model_path = output_path / ("model%d" % i)
                nlp.to_disk(epoch_model_path)
                nlp_loaded = util.load_model_from_path(epoch_model_path)
                for beam_width in eval_beam_widths:
                    for name, component in nlp_loaded.pipeline:
                        if hasattr(component, "cfg"):
                            component.cfg["beam_width"] = beam_width
                    dev_docs = list(
                        corpus.dev_docs(
                            nlp_loaded,
                            gold_preproc=gold_preproc,
                            ignore_misaligned=True,
                        ))
                    nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
                    start_time = timer()
                    scorer = nlp_loaded.evaluate(dev_docs, verbose=verbose)
                    end_time = timer()
                    if use_gpu < 0:
                        gpu_wps = None
                        cpu_wps = nwords / (end_time - start_time)
                    else:
                        gpu_wps = nwords / (end_time - start_time)
                        with Model.use_device("cpu"):
                            nlp_loaded = util.load_model_from_path(
                                epoch_model_path)
                            for name, component in nlp_loaded.pipeline:
                                if hasattr(component, "cfg"):
                                    component.cfg["beam_width"] = beam_width
                            dev_docs = list(
                                corpus.dev_docs(
                                    nlp_loaded,
                                    gold_preproc=gold_preproc,
                                    ignore_misaligned=True,
                                ))
                            start_time = timer()
                            scorer = nlp_loaded.evaluate(dev_docs,
                                                         verbose=verbose)
                            end_time = timer()
                            cpu_wps = nwords / (end_time - start_time)
                    acc_loc = output_path / ("model%d" % i) / "accuracy.json"
                    srsly.write_json(acc_loc, scorer.scores)

                    # Update model meta.json
                    meta["lang"] = nlp.lang
                    meta["pipeline"] = nlp.pipe_names
                    meta["spacy_version"] = ">=%s" % about.__version__
                    if beam_width == 1:
                        meta["speed"] = {
                            "nwords": nwords,
                            "cpu": cpu_wps,
                            "gpu": gpu_wps,
                        }
                        meta["accuracy"] = scorer.scores
                    else:
                        meta.setdefault("beam_accuracy", {})
                        meta.setdefault("beam_speed", {})
                        meta["beam_accuracy"][beam_width] = scorer.scores
                        meta["beam_speed"][beam_width] = {
                            "nwords": nwords,
                            "cpu": cpu_wps,
                            "gpu": gpu_wps,
                        }
                    meta["vectors"] = {
                        "width": nlp.vocab.vectors_length,
                        "vectors": len(nlp.vocab.vectors),
                        "keys": nlp.vocab.vectors.n_keys,
                        "name": nlp.vocab.vectors.name,
                    }
                    meta.setdefault("name", "model%d" % i)
                    meta.setdefault("version", version)
                    meta["labels"] = nlp.meta["labels"]
                    meta_loc = output_path / ("model%d" % i) / "meta.json"
                    srsly.write_json(meta_loc, meta)
                    util.set_env_log(verbose)

                    progress = _get_progress(
                        i,
                        losses,
                        scorer.scores,
                        output_stats,
                        beam_width=beam_width if has_beam_widths else None,
                        cpu_wps=cpu_wps,
                        gpu_wps=gpu_wps,
                    )
                    if i == 0 and "textcat" in pipeline:
                        textcats_per_cat = scorer.scores.get(
                            "textcats_per_cat", {})
                        for cat, cat_score in textcats_per_cat.items():
                            if cat_score.get("roc_auc_score", 0) < 0:
                                msg.warn(
                                    "Textcat ROC AUC score is undefined due to "
                                    "only one value in label '{}'.".format(
                                        cat))
                    msg.row(progress, **row_settings)
                # Early stopping
                if n_early_stopping is not None:
                    current_score = _score_for_model(meta)
                    if current_score < best_score:
                        iter_since_best += 1
                    else:
                        iter_since_best = 0
                        best_score = current_score
                    if iter_since_best >= n_early_stopping:
                        msg.text("Early stopping, best iteration "
                                 "is: {}".format(i - iter_since_best))
                        msg.text("Best score = {}; Final iteration "
                                 "score = {}".format(best_score,
                                                     current_score))
                        break
    finally:
        with nlp.use_params(optimizer.averages):
            final_model_path = output_path / "model-final"
            nlp.to_disk(final_model_path)
        msg.good("Saved model to output directory", final_model_path)
        with msg.loading("Creating best model..."):
            best_model_path = _collate_best_model(meta, output_path,
                                                  nlp.pipe_names)
        msg.good("Created best model", best_model_path)
Пример #36
0
 def stop(self):
     """
     Stop the timer and return the number of seconds since the timer was started.
     """
     self.stopped = timer()
     return self.time
Пример #37
0
 def start(self):
     """
     Start or restart the timer
     """
     self.started = timer()
     self.stopped = self.started
Пример #38
0
    def detect_image(self, image):
        start = timer()

        if self.model_image_size != (None, None):
            assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print('Shape: {}, max: {}'.format(image_data.shape, image_data.max()))
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=max(np.floor(3e-2 * image.size[1] + 0.5).astype('int32'), 8))
        thickness = max((image.size[0] + image.size[1]) // 300, 1)

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        end = timer()
        print(end - start)
        return image
Пример #39
0
from robot import *
from timeit import default_timer as timer
R = Robot()

while True:
    t1 = timer()
    m = R.see((1024, 768), preview=False, preview_time=1)
    t = timer()
    t = t - t1
    print t
    if len(m) > 0:
        print m[0].distance
        print m[0].marker_type
    else:
        print "nothing"
def encode_decode_lightfield(data, LF_LR, LF_HR, inputs, outputs, ColorSpace,
                             decoder_path):
    # light field size
    H = LF_LR.shape[2]
    W = LF_LR.shape[3]
    H_HR = LF_HR.shape[2]
    W_HR = LF_HR.shape[3]

    # patch step sizes
    bs_y = hp.sy
    bs_x = hp.sx
    bs_y_HR = hp.sy_HR
    bs_x_HR = hp.sx_HR
    # patch height/width
    ps_y = hp.H
    ps_x = hp.W
    ps_y_HR = hp.H_HR
    ps_x_HR = hp.W_HR
    ps_v = hp.D

    # patches per row/column
    by = np.int16((H - ps_y) / bs_y) + 1
    bx = np.int16((W - ps_x) / bs_x) + 1

    ids = []
    for i in range(0, len(hp.layer_config)):
        ids.append(hp.layer_config[i]['id'])
    pos = ids.index(decoder_path)
    num_channels = hp.layer_config[pos]['end'] - hp.layer_config[pos]['start']

    # one complete row per batch
    cv_interp = np.zeros([H_HR, W_HR, num_channels], np.float32)
    cv_raw = np.zeros([H_HR, W_HR, num_channels], np.float32)
    mask_sum = np.zeros([H_HR, W_HR], dtype=np.float32)

    print('starting LF encoding/decoding [', end='', flush=True)
    start = timer()

    results_received = 0
    for py in range(by):
        print('.', end='', flush=True)

        stacks_h = np.zeros([bx, ps_v, ps_y, ps_x, hp.C], np.float32)
        stacks_v = np.zeros([bx, ps_v, ps_y, ps_x, hp.C], np.float32)
        cv_in = np.zeros([bx, ps_y_HR, ps_x_HR, int(hp.C / 2)], np.float32)

        for px in range(bx):
            # get single patch
            patch = cdf.get_patch(LF_LR, py, px)
            stacks_v[px, :, :, :, :] = patch['stack_v']
            stacks_h[px, :, :, :, :] = patch['stack_h']
            cv_in[px, :, :, :] = patch['cv']
            if ColorSpace == 'YUV':
                stacks_v[px, :, :, :, :] = rgb2YUV(stacks_v[px, :, :, :, :])
                stacks_h[px, :, :, :, :] = rgb2YUV(stacks_h[px, :, :, :, :])
                cv_in[px, :, :, :] = rgb2YUV(cv_in[px, :, :, :])
            elif ColorSpace == 'YCBCR':
                stacks_v[px, :, :, :, :] = rgb2YCbCr(stacks_v[px, :, :, :, :])
                stacks_h[px, :, :, :, :] = rgb2YCbCr(stacks_h[px, :, :, :, :])
                cv_in[px, :, :, :] = rgb2YCbCr(cv_in[px, :, :, :])
            elif ColorSpace == 'LAB':
                stacks_v[px, :, :, :, :] = rgb2lab(stacks_v[px, :, :, :, :])
                stacks_h[px, :, :, :, :] = rgb2lab(stacks_h[px, :, :, :, :])
                cv_in[px, :, :, :] = rgb2lab(cv_in[px, :, :, :])

        # push complete batch to encoder/decoder pipeline
        batch = dict()
        batch['stacks_h'] = stacks_h
        batch['stacks_v'] = stacks_v
        batch['cv'] = cv_in
        batch['py'] = py
        batch['decoder_path'] = decoder_path

        inputs.put(batch)

        #
        if not outputs.empty():
            result = outputs.get()
            add_result_to_cv(data, result, cv_interp, cv_raw, mask_sum,
                             bs_x_HR, bs_y_HR, bx)
            results_received += 1
            outputs.task_done()

    # catch remaining results
    while results_received < by:
        result = outputs.get()
        add_result_to_cv(data, result, cv_interp, cv_raw, mask_sum, bs_x_HR,
                         bs_y_HR, bx)
        results_received += 1
        outputs.task_done()

    # elapsed time since start of dmap computation
    end = timer()
    total_time = end - start
    print('] done, total time %g seconds.' % total_time)

    # evaluate result
    mse = 0.0

    # compute stats and return result
    print('total time ', end - start)
    # print('MSE          : ', mse)

    # code.interact( local=locals() )
    return (cv_interp, total_time, mse, mask_sum, cv_raw)
    [STR] ...                            VCF column name(s) with the same order as in the VCF (e.g., POS REF)
    --vcf <FILE>                         Input VCF file
    -o, --outcsv <FILE>                  Output csv file
    -h, --help                           Help

"""

from timeit import default_timer as timer
from docopt import docopt
import csv
import numpy as np

if __name__ == "__main__":
    __version__ = 0.1
    start_time = timer()
    args = docopt(__doc__)
    cstr = list(args['STR'])
    with open(args['--outcsv'], 'w', newline='') as outf:
        wr = csv.writer(outf, quoting=csv.QUOTE_ALL)
        # write output csv header
        wr.writerow(['CHROM'] + cstr)
        for line in open(args['--vcf']):
            if line.startswith('#CHROM'):
                sline = list(line.split("\t"))
                # find indexes of the VCF column names
                col_int = [i for i in range(len(sline)) if sline[i] in cstr]
                # add CHROM column
                col_int.insert(0, 0)
            elif line.startswith('Contig'):
                cont_line = np.array(line.split("\t"))
Пример #42
0
# part 2
def count_all_velocities(grid: Grid) -> int:
    """ Generate valid trajectories hitting target area and return their count. """
    grid.find_trajectories()
    return len(grid.trajectories)


def test_count_all_velocities():
    test_coords = parse_file(os.path.join(os.path.dirname(__file__), INPUT_TEST))
    test_count = count_all_velocities(Grid(test_coords))
    assert test_count == 112


if __name__ == "__main__":
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    start_time = timer()
    _, y_coords = parse_file(INPUT_FILE)
    total_velocity = find_max_height(y_coords)
    end_time = timer()
    logging.info(f'({end_time - start_time:.4f}s elapsed) '
                 f'Highest y position achieved is {total_velocity}.')

    start_time = timer()
    coords = parse_file(INPUT_FILE)
    sum_velocity = count_all_velocities(Grid(coords))
    end_time = timer()
    logging.info(f'({end_time - start_time:.4f}s elapsed) '
                 f'Sum of all valid starting velocities is {sum_velocity}.')
Пример #43
0
def clusterRTSP(dataset='target_points_pipes.csv', visualise=0, weights=None, visualise_speed=1.0, qhome=None, kmin=3, kmax=40, vel_limits=0.5, acc=0.5):
    """
    :param dataset: filename of input target points stored in /data/ package subdirectory
    :param visualise: 1 to display simulation otherwise 0
    :param weights: weights used for configuration selection
    :param visualise_speed: speed of visualisation (float)
    :param qhome: specify robot home position (array)
    :param kmin: min number of clusters if xmeans is selected (int)
    :param kmax: max number of clusters if xmeans is selected (int)
    :param vel_limits: desired maximum allowed velocity (as % of max.)
    :param acc: desired maximum acceleration (as % of max.)
    """

    starttime = timer()

    # ________________________________________________________
    ### DEFINE ENVIRONMENT
    # ________________________________________________________
    # initialise environment
    if qhome is None:
        qhome = [0, 0, 0, 0, 0, 0]

    env, robot, manipulator, iktype = environments.load_KUKA_kr6(display=0,  qhome=qhome, vel_limits=vel_limits, acc=acc)

    # ________________________________________________________
    ### INITIALISE
    # ________________________________________________________
    targets = []
    rospack = rospkg.RosPack()
    points = []
    path_to_files = rospack.get_path('cluster_rtsp')
    filename = str(path_to_files + '/data/'+dataset)
    with open(filename, 'rb') as csvfile:
        csv_points=csv.reader(csvfile, delimiter=',')
        for row in csv_points:
            points.append([float(i) for i in row])

    for i in xrange(0, len(points)):
        targets.append(orpy.Ray(np.array(points[i][0:3]), -1*np.array(points[i][3:])))

    n_points = len(targets)
    print('Number of points: %d' % n_points)

    initialise_end = timer()

    # set solver parameters
    params = classes.SolverParameters()
    params.standoff = 0.001
    params.step_size = np.pi / 3
    params.qhome = robot.GetActiveDOFValues()
    params.max_iters = 5000
    params.max_ppiters = 100
    params.try_swap = False
    configurations, ik_cpu_time = kinematics.compute_robot_configurations(env, robot, targets, params)

    # ________________________________________________________
    ### FORMAT CONFIG LIST
    # ________________________________________________________

    # append unique ID to each configuration
    # all_configs format: [j1 j2 j3 j4 j5 j6 cluster_n x y z task_point_ID config_ID]
    row = range(0, len(configurations))
    all_configs = np.column_stack((configurations[:, 0:6], row, configurations[:, 6:10], row))
    home_config = np.hstack((qhome, np.array([0, 0, 0, 0, all_configs[-1, 10] + 1, all_configs[-1, 11] + 1])))
    all_configs = np.vstack((all_configs, home_config))

    # ________________________________________________________
    ### CONFIGURATION SELECTIONS
    # ________________________________________________________

    print('Starting configuration selection...')
    if weights is None:
        weights = [0.2676, 0.3232, 0.2576, 0.0303, 0.0917, 0.0296]
    selected_configurations, select_time = cselect.clusterConfigSelection(all_configs, qhome, weights)

    # ________________________________________________________
    ### APPLY CLUSTERING METHOD TO CONFIG LIST
    # ________________________________________________________
    cluster_start = timer()
    print ('Clustering configurations...')
    xmeans = XMeans.fit(selected_configurations[:, 0:6], kmax=kmax, kmin=kmin, weights=np.array(weights)*6)
    N = xmeans.k
    labels = xmeans.labels_

    print('Number of clusters assigned: %d.' % N)

    # append cluster number to end of configuration points
    for i in xrange(0, len(selected_configurations)):
        selected_configurations[i, 6] = int(labels[i])

    # sort rows in ascending order based on 7th element
    ind = np.argsort(selected_configurations[:, 6])
    selected_configurations = selected_configurations[ind]

    cluster_end = timer()

    # ________________________________________________________
    ### GLOBAL TSP COMPUTATIONS
    # ________________________________________________________

    # Generate new variable for local clusters of points
    clusters = [None] * N
    for i in xrange(0, N):
        cluster = np.where(selected_configurations[:, 6] == i)[0]
        clusters[i] = selected_configurations[cluster, :]

    globsequence_start = timer()
    print('Computing global sequence...')
    gtour, pairs, closest_points, entry_points = tsp.globalTSP(clusters, qhome)
    global_path = gtour[1:-1]
    entry_points = entry_points[1:-1]
    globsequence_end = timer()

    # ________________________________________________________
    ### LOCAL TSP COMPUTATIONS
    # ________________________________________________________

    path = [None] * N
    print('Solving TSP for each cluster...')
    localtsp_start = timer()

    # plan intra-cluster paths
    for i in xrange(0, N):
        if np.shape(clusters[global_path[i]])[0] > 1:
            # Run Two-Opt
            tgraph = tsp.construct_tgraph(clusters[global_path[i]][:, 0:6], distfn=tsp.euclidean_fn)
            path[i] = tsp.two_opt(tgraph, start=entry_points[i][0], end=entry_points[i][1])
        else:
            path[i] = [0]

    localtsp_end = timer()

    # ________________________________________________________
    ### PLAN PATHS BETWEEN ALL POINTS IN COMPUTED PATH
    # ________________________________________________________

    # need to correct color indexing - somehow identify cluster index
    plan_start = timer()
    robot.SetActiveDOFValues(qhome)
    if N == 1:
        c = np.array([1, 0, 0])
    elif N < 10:
        c = np.array([[0.00457608, 0.58586408, 0.09916249],
                      [0.26603989, 0.36651324, 0.64662435],
                      [0.88546289, 0.63658585, 0.75394724],
                      [0.29854082, 0.26499636, 0.20025494],
                      [0.86513743, 0.98080264, 0.18520593],
                      [0.39864878, 0.33938585, 0.27366609],
                      [0.90286517, 0.51585244, 0.09724035],
                      [0.55158651, 0.56320824, 0.44465467],
                      [0.57776588, 0.38423542, 0.59291004],
                      [0.21227011, 0.9159966, 0.59002942]])
    else:
        c = np.random.rand(N, 3)

    h = []
    count = 0
    traj = [None] * (len(selected_configurations) + 1)
    skipped = 0

    points = [None] * len(selected_configurations)

    for i in xrange(0, N):
        idx = global_path[i]
        cluster = i + 1
        print ('Planning paths for configurations in cluster %i ...' % cluster)
        config = 0
        clock_start = timer()
        while config <= len(path[i]) - 1:
            q = clusters[idx][path[i][config], 0:6]

            traj[count] = ru.planning.plan_to_joint_configuration(robot, q, params.planner, params.max_iters, params.max_ppiters)
            if traj[count] is None:
                print ("Could not find a feasible path, skipping current point in cluster %i ... " % cluster)
                skipped += 1
                config += 1
                continue
            points[count] = np.hstack((clusters[idx][path[i][config], 7:10], clusters[idx][path[i][config], 6]))
            robot.SetActiveDOFValues(q)
            config += 1
            count += 1
            if config == len(path[i]):
                end_time = timer() - clock_start
                print('Planning time for cluster %d: %f' % (cluster, end_time))

    traj[-1] = ru.planning.plan_to_joint_configuration(robot, qhome, params.planner, params.max_iters, params.max_ppiters)
    robot.SetActiveDOFValues(qhome)

    # get time at end of planning execution
    end = timer()
    info = classes.InfoObj()

    info.initialise = initialise_end - starttime
    info.getconfig = ik_cpu_time
    info.configselection = select_time
    info.clustering = cluster_end - cluster_start
    info.globaltsp = globsequence_end - globsequence_start
    info.localtsp = localtsp_end - localtsp_start
    info.total_tsp = info.localtsp + info.globaltsp
    info.pathplanning = end - plan_start
    info.totalplanning = end - starttime
    info.execution = kinematics.compute_execution_time(traj)
    info.N_clusters = N
    info.n_points = len(selected_configurations) - skipped

    print('Initialisation time (s): %f' % info.initialise)
    print ('Get configurations time (s): %f' % info.getconfig)
    print ('Select configurations time (s): %f' % info.configselection)
    print ('Clustering time (s): %f' % info.clustering)
    print ('Compute global sequence time (s): %f' % info.globaltsp)
    print ('Local TSP time (s): %f' % info.localtsp)
    print ('Total TSP time (s): %f' % info.total_tsp)
    print ('Path planning time (s): %f' % info.pathplanning)
    print ('Total planning time (s): %f' % info.totalplanning)
    print ('Execution time (s): %f' % info.execution)
    print ('Number of visited points: %d' % info.n_points)
    # ________________________________________________________
    ### ACTUATE ROBOT
    # ________________________________________________________
    if visualise == 1:
        # Open display
        env.SetViewer('qtcoin')
        Tcamera = [[-0.75036157, 0.12281536, -0.6495182, 2.42751741],
                   [0.66099327, 0.12938928, -0.73915243, 2.34414649],
                   [-0.00673858, -0.98395874, -0.17826888, 1.44325936],
                   [0., 0., 0., 1.]]

        time.sleep(1)
        env.GetViewer().SetCamera(Tcamera)
        print('Starting simulation in 2 seconds...')
        time.sleep(2)

        start_exec = timer()
        cluster = 0
        count = 0
        for i in xrange(0, len(traj)):
            if traj[i] is None:
                continue
            if i <= len(points) - 1:
                cluster_next = points[i][3] + 1
            if cluster_next != cluster:
                cluster = cluster_next
                count += 1
                print ('Moving to configurations in cluster {:d} ...'.format(count))

            play_trajectory(env, robot, traj[i], visualise_speed)
            if i <= len(points) - 1:
                h.append(env.plot3(points=(points[i][0:3]), pointsize=4, colors=c[int(points[i][3])]))

        end_exec = timer() - start_exec
        print ('Simulation time: %f' % end_exec)

    raw_input('Press Enter to terminate...')

    return info
Пример #44
0
    def run(self, video_path=0, start_frame=0, conf_thresh=0.6):
        """ Runs the test on a video (or webcam)
        
        # Arguments
        video_path: A file path to a video to be tested on. Can also be a number, 
                    in which case the webcam with the same number (i.e. 0) is 
                    used instead
                    
        start_frame: The number of the first frame of the video to be processed
                     by the network. 
                     
        conf_thresh: Threshold of confidence. Any boxes with lower confidence 
                     are not visualized.
                    
        """

        vid = cv2.VideoCapture(video_path)
        if not vid.isOpened():
            raise IOError((
                "Couldn't open video file or webcam. If you're "
                "trying to open a webcam, make sure you video_path is an integer!"
            ))

        # Compute aspect ratio of video
        vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
        vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
        vidar = vidw / vidh

        # Skip frames until reaching start_frame
        if start_frame > 0:
            vid.set(cv2.CAP_PROP_POS_MSEC, start_frame)

        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()

        while True:
            retval, orig_image = vid.read()
            if not retval:
                print("Done!")
                return

            im_size = (self.input_shape[0], self.input_shape[1])
            resized = cv2.resize(orig_image, im_size)
            rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)

            # Reshape to original aspect ratio for later visualization
            # The resized version is used, to visualize what kind of resolution
            # the network has to work with.
            to_draw = cv2.resize(
                resized,
                (int(self.input_shape[0] * vidar), self.input_shape[1]))

            # Use model to predict
            inputs = [image.img_to_array(rgb)]
            tmp_inp = np.array(inputs)
            x = preprocess_input(tmp_inp)

            y = self.model.predict(x)

            # This line creates a new TensorFlow device every time. Is there a
            # way to avoid that?
            results = self.bbox_util.detection_out(y)

            if len(results) > 0 and len(results[0]) > 0:
                # Interpret output, only one frame is used
                det_label = results[0][:, 0]
                det_conf = results[0][:, 1]
                det_xmin = results[0][:, 2]
                det_ymin = results[0][:, 3]
                det_xmax = results[0][:, 4]
                det_ymax = results[0][:, 5]

                top_indices = [
                    i for i, conf in enumerate(det_conf) if conf >= conf_thresh
                ]

                top_conf = det_conf[top_indices]
                top_label_indices = det_label[top_indices].tolist()
                top_xmin = det_xmin[top_indices]
                top_ymin = det_ymin[top_indices]
                top_xmax = det_xmax[top_indices]
                top_ymax = det_ymax[top_indices]

                for i in range(top_conf.shape[0]):
                    xmin = int(round(top_xmin[i] * to_draw.shape[1]))
                    ymin = int(round(top_ymin[i] * to_draw.shape[0]))
                    xmax = int(round(top_xmax[i] * to_draw.shape[1]))
                    ymax = int(round(top_ymax[i] * to_draw.shape[0]))

                    # Draw the box on top of the to_draw image
                    class_num = int(top_label_indices[i])
                    cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax),
                                  self.class_colors[class_num], 2)
                    text = self.class_names[class_num] + " " + ('%.2f' %
                                                                top_conf[i])

                    text_top = (xmin, ymin - 10)
                    text_bot = (xmin + 80, ymin + 5)
                    text_pos = (xmin + 5, ymin)
                    cv2.rectangle(to_draw, text_top, text_bot,
                                  self.class_colors[class_num], -1)
                    cv2.putText(to_draw, text, text_pos,
                                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1)

            # Calculate FPS
            # This computes FPS for everything, not just the model's execution
            # which may or may not be what you want
            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0

            # Draw FPS in top left corner
            cv2.rectangle(to_draw, (0, 0), (50, 17), (255, 255, 255), -1)
            cv2.putText(to_draw, fps, (3, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                        (0, 0, 0), 1)

            cv2.imshow("SSD result", to_draw)
            cv2.waitKey(10)
    def process_text(self, extract_np=True, write_processed_files=True):

        content_dict = dict()
        no_method_match = []  # -- pubs with no regex match for methodology
        no_method_found = [
        ]  # -- pubs with no 'methodology' section in content_dict
        no_abstract = []  # -- pubs with no abstract
        no_keywords = []  # -- pubs with no keywords

        directory = "project/additional_files/pdf-info/"
        self.create_directory(directory)

        for _, row in self.pub_df.iterrows():
            try:
                np_file = Path('project/additional_files/nounPhrases/',
                               row['text_file_name'])

                if np_file.exists():
                    continue
                else:
                    print(row['pdf_file_name'])
                    start1 = timer()

                    # -- extract pdf metadata
                    pdf_info = self.extract_pdf_metadata(row)

                    # -- extract text from pdf
                    row['text'] = self.extract_text_from_pdf(
                        row['pdf_file_name'], row['text_file_name'])
                    reduced_content = row['text']

                    # remove references or bibliography
                    reduced_content = self.remove_references(reduced_content)

                    # remove acknowledgement
                    reduced_content = self.remove_acknowledgment(
                        reduced_content)

                    # -- clean up remaining text
                    reduced_content = reduced_content.replace(row['title'], '')

                    reduced_content, jel_field, jel_method = self.remove_noise_handle_hyphenation_extract_jel_codes(
                        reduced_content, dehyphenation=True)

                    # --- store jel fields and methods in content dict
                    self.store_jel_fields_methods(content_dict, jel_field,
                                                  jel_method, row)

                    reduced_content = self.remove_url(reduced_content)

                    # --- extract main sections from remaining content
                    abstract_found = False
                    abstract_beg = self.find_abstract(reduced_content)
                    abstract_end = -1

                    # --- find_keywords
                    abstract_beg, abstract_end, keywords_beg = self.find_keywords(
                        abstract_beg, abstract_end, content_dict, no_keywords,
                        pdf_info, reduced_content, row)
                    # --- find introdcution
                    introduction_beg = self.find_introduction(reduced_content)

                    # --- find abstract
                    if keywords_beg < abstract_beg < introduction_beg:
                        abstract_end = introduction_beg

                    elif abstract_beg < introduction_beg and keywords_beg < 0:
                        abstract_end = introduction_beg
                        if abstract_beg < 0:
                            abstract_beg = 0

                    elif introduction_beg < 0 and keywords_beg < 0 and abstract_beg < 0:
                        abstract_beg = 0

                    if abstract_beg <= 0:  # extract the first para as abstract
                        paras = self.extract_paragraphs(reduced_content)
                        for p in paras:
                            try:
                                if (
                                        len(p.split(' ')) < 10
                                        or len(p.split('.')) < 2
                                        or detect(p) != 'en'
                                ):  # less than 10 words in a para or just 1 line
                                    reduced_content = reduced_content.replace(
                                        p, '').strip()
                                    continue
                                if p in list(pdf_info.values()):
                                    reduced_content = reduced_content.replace(
                                        p, '').strip()
                                    continue
                                else:
                                    count = Counter(
                                        ([token.pos_
                                          for token in self.nlp(p)]))
                                    if (count['PROPN'] >
                                            0.68 * sum(count.values())):
                                        reduced_content = reduced_content.replace(
                                            p, '').strip()
                                        continue
                                    else:
                                        abstract = p
                                        content_dict['abstract'] = abstract
                                        abstract_found = True
                                        reduced_content = reduced_content.replace(
                                            abstract, '').strip()
                                        break
                            except:  #raise LangDetectException(ErrorCode.CantDetectError,
                                continue

                    else:  #if abstract_end == keywords_beg or intro_beg
                        if abstract_end > 0:
                            abstract = reduced_content[
                                abstract_beg:abstract_end]
                            content_dict['abstract'] = abstract
                            abstract_found = True
                            reduced_content = reduced_content[abstract_end:]

                    # -- find the subject of article
                    self.find_subject_of_article(content_dict, pdf_info)

                    # -- remove metadata from article
                    reduced_content = self.remove_all_metadata_content_from_article(
                        pdf_info, reduced_content)

                    # -- find all other sections
                    methods_match = [(m.start(), m.end()) for m in re.finditer(
                        r"^.*(Data|DATA|Methodology|\b[m|M]ethods?\b|METHODS?|METHODOLODY|APPROACH|Approach).*$",
                        reduced_content,
                        flags=re.MULTILINE)]
                    summary_index = [
                        m.start() for m in re.finditer(
                            r"^\d?\.?\s?Summary|^\d?\.?\s?SUMMARY|^\d?\.?\s?Conclusions?|^\d?\.?\s?CONCLUSIONS?|^\d?\.?\s?Concluding\sRemarks|^\d?\.?\s?CONCLUDING\sREMARKS",
                            reduced_content,
                            flags=re.MULTILINE)
                    ]
                    results_index = [
                        m.start() for m in re.finditer(
                            r"^\d?\.?\s?Results?|^\d?\.?\s?RESULTS?",
                            reduced_content,
                            flags=re.MULTILINE)
                    ]
                    discussion_index = [
                        m.start() for m in re.finditer(
                            r"^\d?\.?\s?Discussions?|^\d?\.?\s?DISCUSSIONS?",
                            reduced_content,
                            flags=re.MULTILINE)
                    ]
                    methods_beg = -1
                    results_beg = -1
                    summary_beg = -1
                    discussion_beg = -1
                    intro_found = False

                    if len(methods_match) == 0:
                        no_method_match.append(row['pdf_file_name'])

                    # -- find introduction
                    introduction_beg = self.find_introduction(reduced_content)
                    if len(results_index) > 0:
                        results_beg = results_index[
                            -1]  # -- the heading can also appear in table of contents, so take the last match found
                    if len(summary_index) > 0:
                        summary_beg = summary_index[-1]
                    if len(discussion_index) > 0:
                        discussion_beg = discussion_index[-1]
                        # Discussion papers
                        if discussion_beg < results_beg or discussion_beg < abstract_beg or \
                                discussion_beg < keywords_beg or discussion_beg < introduction_beg:
                            discussion_beg = -1  # discussion section not found yet

                    # -- find methodology
                    methods_beg = self.find_introduction_section(
                        content_dict, intro_found, introduction_beg,
                        methods_beg, methods_match, reduced_content)

                    self.find_methodology_section(content_dict, discussion_beg,
                                                  methods_beg, no_method_found,
                                                  reduced_content, results_beg,
                                                  row['pdf_file_name'],
                                                  summary_beg)

                    # -- find abstract
                    self.find_abstract_section(abstract_beg, abstract_end,
                                               abstract_found, content_dict,
                                               methods_beg, no_abstract,
                                               reduced_content, row)

                    # -- find summary and discussion sections
                    reduced_content = self.find_summary_discussion_sections(
                        content_dict, discussion_beg, reduced_content,
                        summary_beg)

                    # -- remove the sections already found so that we search through the remaining content
                    reduced_content = reduced_content.replace(
                        content_dict['introduction'], '').strip()
                    reduced_content = reduced_content.replace(
                        content_dict['methodology'], '').strip()

                    if reduced_content.find(content_dict['abstract']) > 0:
                        reduced_content = reduced_content[
                            reduced_content.find(content_dict['abstract']) +
                            len(content_dict['abstract']):]

                    # -- reduced content stores all the sections not found
                    content_dict['reduced_content'] = reduced_content

                    # -- write the content_dict to file
                    if (write_processed_files):
                        self.write_processed_content(content_dict,
                                                     row['text_file_name'])

                    # -- extract noun-phrases from the content_dict that contains section-wise data
                    if (extract_np):
                        self.get_section_wise_NPs(content_dict, row)

            except Exception as e:
                logging.exception(e)
                continue

        print(f'no methods match: {len(no_method_match)} , {no_method_match}'
              )  # -- pubs with no regex match for methodology
        print(f'no methods found: {len(no_method_found)} , {no_method_found}'
              )  # -- pubs with no 'methodology' section in content_dict
        print(f'no abstract found: {len(no_abstract)} , {no_abstract}'
              )  # -- pubs with no abstract
        print(f'no methods found: {len(no_keywords)} , {no_keywords}'
              )  # -- pubs with no keywords
Пример #46
0
            vpsnr += misc_functions.PSNR(y_cap, batch.squeeze().float()/255)

        sw.add_scalar('epoch_validation_loss', vloss/len(valid_data), epoch)
        sw.add_scalar('epoch_validation_ssim', vssim/len(valid_data), epoch)
        sw.add_scalar('epoch_validation_psnr', vpsnr/len(valid_data), epoch) 



def main():
    for ep in range(1, EPOCHS + 1):
        train(ep)
        validate(ep)

    torch.save(model.state_dict(), 'model_states/model_state_Model_VAE_2.pth')




# running main()
t0 = timer()

main()

t1 = timer()

print(f"Took {t1 - t0} seconds, done.")




def move():
    """
    Called when the Battlesnake Engine needs to know your next my_move.
    The data parameter will contain information about the board.
    Your response must include your my_move of up, down, left, or right.
    """
    start = timer()

    # my_moves
    delta = [
        [-1, 0],  # go up
        [0, -1],  # go left
        [1, 0],  # go down
        [0, 1]
    ]  # go right

    delta_name = ['up', 'left', 'down', 'right']

    # call for data
    data = bottle.request.json
    turn = data['turn']
    # pretty #print
    ##print(f"turn: {turn}\n{json.dumps(data, indent=2)}")
    # board size
    width = data['board']['width']
    height = data['board']['height']

    # my head and body locations
    snakes = data['board']['snakes']
    me = data['you']
    # my health
    my_health = me['health']
    ##print(f'me\n{me}')
    my_head_y = me['body'][0]['y']
    my_head_x = me['body'][0]['x']

    my_tail_y = me['body'][-1]['y']
    my_tail_x = me['body'][-1]['x']

    # find next tail
    my_next_tail_y = me['body'][-2]['y']
    my_next_tail_x = me['body'][-2]['x']

    next_tails = []
    for i in range(len(snakes)):
        next_tail_y = snakes[i]['body'][-2]['y']
        next_tail_x = snakes[i]['body'][-2]['x']

        next_tails.append([next_tail_y, next_tail_x])

    ##print(f'tail yx = {my_tail_y},{my_tail_x}\n'
    #     f'nexttail_yx: {next_tail_y},{next_tail_x}')
    my_id = me['id']

    # for comparison with opponent's snakes
    my_body_len = len(me['body'])

    # moves info
    which_move = ''
    my_move = ''
    move_num = 0

    # flags
    path_found = False
    ready = False
    risky = False
    riskier = False

    # make state info
    # make snakes_grid
    snakes_grid, solo_grid, snake_heads, snake_tails = \
        fill_snakes_grid(snakes, width, height, my_body_len, my_id)

    # check_grid
    check_grid = np.copy(snakes_grid)
    for i in range(len(next_tails)):
        next_tail_y = next_tails[i][0]
        next_tail_x = next_tails[i][1]
        check_grid[next_tail_y, next_tail_x] = 0
    # todo: use this? get distances to snake heads
    # dists, snaketype, y, x
    snake_dists = check_dist_to_snakes(snake_heads, my_head_y, my_head_x)

    # find free spaces and dists
    # dist, freey, freex
    # check path to free only considers those beyond min_dist
    free_spaces_arr = find_free_spaces(snakes_grid, my_head_y, my_head_x)

    if risky:
        snakes_grid[snakes_grid == next_samehead_val] = \
            next_smhead_val
        # todo  snakeheads (snaketype, y,x), take out the equal snakes
        # but it's only for food

    elif riskier:
        new_snake_heads = []
        snakes_grid[snakes_grid == next_bighead_val] = \
            next_smhead_val
        for f in range(len(snake_heads)):
            curr_head = snake_heads[f]
            curr_type = curr_head[0]
            if curr_type == big_head_val:
                new_snake_heads.append(curr_head)
        snake_heads = new_snake_heads[:]

    attack = False
    # todo: if longest, start moving towards next_smhead_val on snakes grid

    num_to_attack = 2
    if risky:
        num_to_attack = len(snakes) - 1
    # todo: on risky, could attack with more snakes left
    # attack when only one snake left
    if len(snakes) == num_to_attack:
        for i in range(len(snakes)):
            if len(snakes[i]['body']) < my_body_len:
                attack = True
            else:
                attack = False
                break

    max_dist_for_food = (width + height) * 2

    # leave walls asap
    leave_walls = False

    if ((my_head_x == 0 or my_head_x == (snakes_grid.shape[1] - 1)) or \
        (my_head_y == 0 or my_head_y == (snakes_grid.shape[0] - 1))) and \
            my_health > 10:
        # print('walls')
        my_move, path_found = get_away_walls(my_head_y, my_head_x, snakes_grid,
                                             check_grid, snake_tails)
        if path_found and my_move != 'snakeshit':
            found_free = check_path_to_tail(my_head_y, my_head_x, move_num,
                                            snakes_grid, check_grid,
                                            snake_tails)
            if found_free:
                which_move = 'get away walls'
                leave_walls = True
            else:
                path_found = False

    # if me_longest, chase 8s
    if attack and not leave_walls:
        # print('attack')
        target_arr = []
        # calculate distances and sort
        for j in range(len(snake_heads)):
            snake_type = snake_heads[j][0]
            target_y = snake_heads[j][1]
            target_x = snake_heads[j][2]
            dist = heuristic([target_y, target_x], [my_head_y, my_head_x])
            target_arr.append([dist, target_y, target_x])
        targets = sorted(target_arr, key=lambda x: x[0])
        for i in range(len(targets)):
            victim = targets[i]
            move_num, my_move, path_found = \
                search(victim[1], victim[2], my_head_y, my_head_x,
                       snakes_grid)
            if path_found and my_move != 'snakeshit':

                found_free = check_path_to_tail(my_head_y, my_head_x, move_num,
                                                snakes_grid, check_grid,
                                                snake_tails)
                if found_free:
                    break
                else:
                    path_found = False
            elif my_move == 'snakeshit':
                path_found = False

    # list of dicts of food locations
    food = data['board']['food']
    # list in order of nearest to furthest food tuples (dist, y,x)
    food_arr = []
    # if there is food
    if len(food) > 0:
        food_arr = fill_food_arr(food, my_head_y, my_head_x)
    # there is a food so A star for route to food using snake grid for g
    food_count = 0

    found_path = False
    # get food
    eating = False
    count = 0
    get_it = False
    if not path_found and not leave_walls and not attack:
        # print('food')
        while not eating and count < len(food_arr):
            curr_food = food_arr[count]
            food_dist = curr_food[0]
            food_y = curr_food[1]
            food_x = curr_food[2]
            food_count += 1
            if len(snakes) > 1:
                for i in range(len(snake_heads)):
                    curr_head = snake_heads[i]
                    head_type = curr_head[0]
                    snakehead_y = curr_head[1]
                    snakehead_x = curr_head[2]

                    opp_dist = heuristic([snakehead_y, snakehead_x],
                                         [food_y, food_x])
                    if food_dist < opp_dist:
                        get_it = True
                    elif head_type == small_head_val and \
                            food_dist <= opp_dist:
                        get_it = True
                    else:
                        get_it = False
                        break
            else:
                get_it = True

            if get_it:
                move_num, my_move, path_found = \
                    search(food_y, food_x, my_head_y, my_head_x,
                           snakes_grid, check_grid)
                if path_found:

                    found_free = check_path_to_tail(my_head_y, my_head_x,
                                                    move_num, snakes_grid,
                                                    check_grid, snake_tails)

                    if found_free:
                        which_move = 'get food'
                        eating = True
                    else:
                        path_found = False
                else:
                    path_found = False

            count += 1

    # shorten food_arr
    # food_arr = food_arr[food_count:]
    count = 0
    # chase my tail
    if not path_found and not leave_walls and not attack:
        # print('my tail')
        # chase tail if nothing in food_arr
        move_num, my_move, path_found = search(my_tail_y, my_tail_x, my_head_y,
                                               my_head_x, snakes_grid,
                                               check_grid)
        if path_found:
            '''
            found_free = check_path_to_free(my_head_y, my_head_x,
                                move_num, snakes_grid, free_spaces_arr)
            '''
            found_free = check_path_to_tail(my_head_y, my_head_x, move_num,
                                            snakes_grid, check_grid,
                                            snake_tails)
            if found_free:
                which_move = 'my tail'
            else:
                path_found = False
        else:
            path_found = False

    count = 0
    # chase other snakes' tails
    if not path_found and not leave_walls and not attack:
        # print('other tails')
        for q in range(len(snake_tails)):
            curr_tail = snake_tails[q]
            move_num, my_move, path_found = search(curr_tail[0], curr_tail[1],
                                                   my_head_y, my_head_x,
                                                   snakes_grid, check_grid)
            if path_found:
                '''
                found_free = check_path_to_free(my_head_y, my_head_x,
                                                move_num, snakes_grid, free_spaces_arr)
                '''
                found_free = check_path_to_tail(my_head_y, my_head_x, move_num,
                                                snakes_grid, check_grid,
                                                snake_tails)
                if found_free:
                    which_move = 'opponent tail'
                    break
                else:
                    path_found = False

            else:
                path_found = False

    # sorta random
    # todo: change 9s to 8s
    if not path_found and not leave_walls and not attack:
        # print('random')
        next_heads = [next_smhead_val, next_samehead_val, next_bighead_val]
        for t in range(len(delta)):
            next_y = my_head_y + delta[t][0]
            next_x = my_head_x + delta[t][1]
            if 0 <= next_y < snakes_grid.shape[0] and \
                    0 <= next_x < snakes_grid.shape[1]:
                if snakes_grid[next_y, next_x] == 0 or \
                        snakes_grid[next_y, next_x] in next_heads:
                    my_move = delta_name[t]
                    which_move = 'last resort'
                    # #print(f'my_move: {my_move}')
                    path_found = True
                    break
                    '''
                    found_free = check_path_to_tail(my_head_y, my_head_x,
                                    move_num, snakes_grid,
                                        check_grid, snake_tails)
                    if found_free:
                        my_move = delta_name[t]
                        which_move = 'last resort'
                        ##print(f'my_move: {my_move}')
                        path_found=True
                        break
                    '''
                    '''
                    else:
                        found_free = check_path_to_free(my_head_y, my_head_x,
                                    move_num, snakes_grid, free_spaces_arr)
                        if found_free:
                            my_move = delta_name[t]
                            which_move = 'last resort'
                            # #print(f'my_move: {my_move}')
                            break
                        else:
                            snakes_grid[snakes_grid==next_bighead_val] \
                                    = next_smhead_val
                            snakes_grid[snakes_grid==next_samehead_val]\
                                    = next_smhead_val
                    '''

    shout = "get in my belly!"

    response = {"move": my_move, "shout": shout}
    end = timer()
    # print(f'\n\nturn: {turn}\ntime: {end-start}\nmy_move: {my_move}\n '
    # f'which_move: {which_move}\n\n')
    ##print(f'snakes_grid\n {snakes_grid}\nsolo_grid\n {solo_grid}\n')
    return HTTPResponse(
        status=200,
        headers={"Content-Type": "application/json"},
        body=json.dumps(response),
    )
keyPair = RSA.generate(4096)

pubKey = keyPair.publickey()

#print(f"Public key:  (n={hex(pubKey.n)}, e={hex(pubKey.e)})")
#print(f"Private key: (n={hex(pubKey.n)}, d={hex(keyPair.d)})")

t_end = str(datetime.now())
auth_token = b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1OTAyODIwMjksIm5iZiI6MTU5MDI4MTEyOSwidXNlcl9pZCI6MTAxNzh9.8V0JHxu-eLpGau0HMV3Hz28M5yVhOo3n7Qp2qsizau8'
team_id = 1000001
patient_id = 1000000

enc_and_sign_time = {}

for i in range(100000):
    start = timer()

    # Example sign message
    message_to_sign = {
        'auth_token': auth_token.decode('utf-8'),
        'request': "JoinTeam",
        'patient_id': patient_id,
    }

    message = json.dumps(message_to_sign).encode('utf-8')
    digest = SHA256.new()
    digest.update(message)

    signer = PKCS1_v1_5.new(keyPair)
    sig = signer.sign(digest)
        x = Activation('relu')(x)
        x = Dropout(dropout)(x)
    bld_hl_output = x

    x = Dense(labels.building.shape[1])(x)
    x = BatchNormalization()(x)
    bld_output = Activation(
        'softmax', name='building_output')(x)  # no dropout for an output layer

    bld_model = Model(inputs=input, outputs=bld_output)
    bld_model.compile(
        optimizer=optimizer,
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    startTime = timer()
    bld_history = bld_model.fit(
        x=rss,
        y=labels.building,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        validation_split=validation_split,
        # validation_data=({
        #     'input': testing_data.rss_scaled
        # }, {
        #     'building_output': testing_data.labels.building
        # }),
        # callbacks=[tensorboard],
        shuffle=True)
    elapsedTime = timer() - startTime
Пример #50
0
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import GroupKFold
from sklearn.metrics import mean_absolute_error
import lightgbm as lgb
import xgboost
import warnings
from datetime import datetime
warnings.simplefilter(action='ignore', category=FutureWarning)
import logging
import gc
from timeit import default_timer as timer
import time
from catboost import CatBoostRegressor, Pool
from sklearn.neighbors import KNeighborsClassifier
start = timer()

####################
# CONFIGURABLES
#####################

# MODEL NUMBER
MODEL_NUMBER = "M055"
script_name = os.path.basename(__file__).split('.')[0]
if script_name not in MODEL_NUMBER:
    logger.error('Model Number is not same as script! Update before running')
    raise SystemExit(
        'Model Number is not same as script! Update before running')

# Make a runid that is unique to the time this is run for easy tracking later
run_id = "{:%m%d_%H%M}".format(datetime.now())
Пример #51
0
        a[i] += 1


def encode_mbd_to_pickle(output_dir, mbd):
    # output_directory.jpeg -> output_directory.pickle
    filename = output_dir[:-5] + '.pickle'
    if os.path.isfile(filename):
        print('File already exists')
    else:
        with open(filename, 'wb') as f_out:
            pickle.dump(mbd, f_out)
        f_out.close()
        print('Successfully serialized saliency map (mbd) to:', filename)


if __name__ == "__main__":
    n = 10000000
    a = np.ones(n, dtype=np.float64)
    b = np.ones(n, dtype=np.float32)

    start = timer()
    func(a)
    print("With GPU (@jit):", timer() - start)

    start = timer()
    func2(a)
    print("With GPU (@jit(nopython=True)):", timer() - start)

    mbd = psal.get_saliency_mbd(demo_img)
    encode_mbd_to_pickle(os.getcwd() + '\\' + demo_img, mbd)
def demo():
    """ _test_kdtree_compare
    
    This demo compares creation and query speed for different kd tree 
    implementations. They are fed with instances from the covtype dataset. 
    
    Three kd tree implementations are compared: SciPy's KDTree, NumPy's 
    KDTree and scikit-multiflow's KDTree. For each of them the demo will 
    time the construction of the tree on 1000 instances, and then measure 
    the time to query 100 instances. The results are displayed in the 
    terminal.
    
    """
    warnings.filterwarnings("ignore", ".*Passing 1d.*")

    stream = FileStream('../datasets/covtype.csv', -1, 1)
    stream.prepare_for_use()
    filter = OneHotToCategorical([[10, 11, 12, 13],
                                  [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
                                   34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]])

    X, y = stream.next_sample(1000)
    X = filter.transform(X)
    # print(X)

    X_find, y = stream.next_sample(100)
    X_find = filter.transform(X_find)
    print(X_find[4])
    # Normal kdtree
    start = timer()
    scipy = spatial.KDTree(X, leafsize=40)
    end = timer()
    print("\nScipy KDTree construction time: " + str(end-start))

    start = timer()
    for i in range(10):
        ind = scipy.query(X_find[i], 8)
        # print(ind)
    end = timer()
    print("Scipy KDTree query time: " + str(end - start))

    del scipy

    # Fast kdtree
    start = timer()
    opt = KDTree(X, metric='euclidean', return_distance=True)
    end = timer()
    print("\nOptimal KDTree construction time: " + str(end-start))

    start = timer()
    for i in range(100):
        ind, dist = opt.query(X_find[i], 8)
        # print(ind)
        # print(dist)
    end = timer()
    print("Optimal KDTree query time: " + str(end - start))

    del opt

    # Sklearn kdtree
    start = timer()
    sk = ng.KDTree(X, metric='euclidean')
    end = timer()
    print("\nSklearn KDTree construction time: " + str(end-start))

    start = timer()
    for i in range(100):
        ind, dist = sk.query(np.asarray(X_find[i]).reshape(1, -1), 8, return_distance=True)
        # print(ind)
        # print(dist)
    end = timer()
    print("Sklearn KDTree query time: " + str(end - start) + "\n")

    del sk
Пример #53
0
def time_rounded(timer_start, precision=3):
    return round(timer() - timer_start, precision)
Пример #54
0
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 0)

# Start streaming
pipeline.start(config)

align_to = rs.stream.depth
align = rs.align(align_to)

try:
    while True:
        start = timer()
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        #frameWait = timer ()
        #frameWaitTime = frameWait - start
        #print(str(frameWaitTime) + " frame wait time")
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        if not depth_frame or not color_frame:
            continue

        #syncedFrame = timer()
        #syncedFrameTime = syncedFrame - start
Пример #55
0
def set_time():

    timme = timer()

    return timme
Пример #56
0
## PYTHON MODULES ##
from timeit import default_timer as timer

## HYBRID MODULES ##
import init_1D as init
import auxilliary_1D as aux
import particles_1D as particles
import fields_1D as fields
import sources_1D as sources
import save_routines as save
import pdb
from simulation_parameters_1D import save_particles, save_fields, te0_equil

if __name__ == '__main__':
    start_time = timer()

    # Initialize simulation: Allocate memory and set time parameters
    print('Initializing arrays...')
    pos, vel, Ie, W_elec, Ib, W_mag, idx, Ep, Bp, temp_N = init.initialize_particles(
    )
    B, E_int, E_half, Ve, Te, Te0 = init.initialize_fields()
    q_dens, q_dens_adv, Ji, ni, nu = init.initialize_source_arrays()
    old_particles, old_fields, temp3De, temp3Db, temp1D,\
                                v_prime, S, T, mp_flux  = init.initialize_tertiary_arrays()

    print('Collecting initial moments...')
    # Collect initial moments and save initial state
    sources.collect_moments(vel, Ie, W_elec, idx, q_dens, Ji, ni, nu)

    if te0_equil == 1:
        init.set_equilibrium_te0(q_dens, Te0)
def preprocess_tsa_data():

    # OPTION 1: get a list of all subjects for which there are labels
    df = pd.read_csv(STAGE1_LABELS)
    df['Subject'], df['Zone'] = df['Id'].str.split('_',1).str
    SUBJECT_LIST = df['Subject'].unique()

    # OPTION 2: get a list of all subjects for whom there is data
    # SUBJECT_LIST = [os.path.splitext(subject)[0] for subject in os.listdir(INPUT_FOLDER)]
    #print(len(SUBJECT_LIST))
    #print(SUBJECT_LIST)

    # OPTION 3: get a list of subjects for small bore test purposes
    #SUBJECT_LIST = ['00360f79fd6e02781457eda48f85da90','0043db5e8c819bffc15261b1f1ac5e42',
    #                '0050492f92e22eed3474ae3a6fc907fa','006ec59fa59dd80a64c85347eef810c7',
    #                '0097503ee9fa0606559c56458b281a08','011516ab0eca7cad7f5257672ddde70e',
    #                '47e2a4a8e13ec7100f6af8cd839d1bb3','e087226320cc189142228b5fb93ed58f']

    # intialize tracking and saving items
    batch_num = 1
    count = 0
    threat_zone_examples = []
    start_time = timer()
    print(len(SUBJECT_LIST))
    for subject in SUBJECT_LIST:
        count += 1
        # read in the images
        print('--------------------------------------------------------------')
        print('t+> {:5.3f} |Reading images for subject #: {}'.format(timer()-start_time,
                                                                     subject))
        print('--------------------------------------------------------------')
        images = tsa.read_data(INPUT_FOLDER + '/' + subject + '.aps')

        # transpose so that the slice is the first dimension shape(16, 620, 512)
        images = images.transpose()

        # for each threat zone, loop through each image, mask off the zone and then crop it
        for tz_num, threat_zone_x_crop_dims in enumerate(zip(tsa.zone_slice_list,
                                                             tsa.zone_crop_list)):

            threat_zone = threat_zone_x_crop_dims[0]
            crop_dims = threat_zone_x_crop_dims[1]

            # get label
            label = np.array(tsa.get_subject_zone_label(tz_num,
                             tsa.get_subject_labels(STAGE1_LABELS, subject)))
           # print(STAGE1_LABELS, subject)
            for img_num, img in enumerate(images):

                print('Threat Zone:Image -> {}:{}'.format(tz_num, img_num))
                print('Threat Zone Label -> {}'.format(label))
                if label[0] == 0:
                    print('threat is present')
                    if threat_zone[img_num] is not None:

                        # correct the orientation of the image
                        print('-> reorienting base image')
                        base_img = np.flipud(img)
                        print('-> shape {}|mean={}'.format(base_img.shape,
                                                           base_img.mean()))

                        # convert to grayscale
                        print('-> converting to grayscale')
                        rescaled_img = tsa.convert_to_grayscale(base_img)
                        print('-> shape {}|mean={}'.format(rescaled_img.shape,
                                                           rescaled_img.mean()))

                        # spread the spectrum to improve contrast
                        print('-> spreading spectrum')
                        high_contrast_img = tsa.spread_spectrum(rescaled_img)
                        print('-> shape {}|mean={}'.format(high_contrast_img.shape,
                                                           high_contrast_img.mean()))

                        # get the masked image
                        print('-> masking image')
                        masked_img = tsa.roi(high_contrast_img, threat_zone[img_num])
                        print('-> shape {}|mean={}'.format(masked_img.shape,
                                                           masked_img.mean()))

                        # crop the image
                        print('-> cropping image')
                        cropped_img = tsa.crop(masked_img, crop_dims[img_num])
                        print('-> shape {}|mean={}'.format(cropped_img.shape,
                                                           cropped_img.mean()))

                        # normalize the image
                        print('-> normalizing image')
                        normalized_img = tsa.normalize(cropped_img)
                        print('-> shape {}|mean={}'.format(normalized_img.shape,
                                                           normalized_img.mean()))

                        # zero center the image
                        print('-> zero centering')
                        zero_centered_img = tsa.zero_center(normalized_img)
                        print('-> shape {}|mean={}'.format(zero_centered_img.shape,
                                                           zero_centered_img.mean()))

                        # append the features and labels to this threat zone's example array
                        print ('-> appending example to threat zone {}'.format(tz_num))
                        threat_zone_examples.append([[tz_num], zero_centered_img, label])
                        center = (125,125)
                        M = cv2.getRotationMatrix2D(center, 5, 1.0)
                        rotated = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        # print('rotated image shape {} | mean= {}'.format(zero_centered_img.shape,
                                                                        #  zero_centered_img.mean()))
                        # cv2.imwrite("thumbnail.png", cropped)
                        # cv2.imwrite("rotated.jpg", rotated)
                        # cv2.imshow("original.jpg", zero_centered_img)
                        # cv2.waitKey(0)
                        # cv2.imshow("rotated.jpg", rotated)
                        # cv2.waitKey(0)
                        threat_zone_examples.append([[tz_num], rotated, label])
                        M = cv2.getRotationMatrix2D(center, 10, 1.0)
                        rotated1 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated1, label])
                        # cv2.imshow("rotated1.jpg", rotated1)
                        # cv2.waitKey(0)
                        M = cv2.getRotationMatrix2D(center, 15, 1.0)
                        rotated2 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated2, label])
                        # cv2.imshow("rotated2.jpg", rotated2)
                        # cv2.waitKey(0)
                        # M = cv2.getRotationMatrix2D(center, 20, 1.0)
                        rotated3 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated3, label])
                        # cv2.imshow("rotated3.jpg", rotated3)
                        # cv2.waitKey(0)
                        print ('-> shape {:d}:{:d}:{:d}:{:d}:{:d}:{:d}'.format(
                                                             len(threat_zone_examples),
                                                             len(threat_zone_examples[0]),
                                                             len(threat_zone_examples[0][0]),
                                                             len(threat_zone_examples[0][1][0]),
                                                             len(threat_zone_examples[0][1][1]),
                                                             len(threat_zone_examples[0][2])))
                    else:
                        print('-> No view of tz:{} in img:{}. Skipping to next...'.format(
                                    tz_num, img_num))
                    print('------------------------------------------------')
                else:
                    print('threat not present and label is', label[0])
                    if count >= 0:
                        # count = 0
                        print('IN LOOP')
                        if threat_zone[img_num] is not None:
                            # correct the orientation of the image
                            print('-> reorienting base image')
                            base_img = np.flipud(img)
                            print('-> shape {}|mean={}'.format(base_img.shape,
                                                               base_img.mean()))

                            # convert to grayscale
                            print('-> converting to grayscale')
                            rescaled_img = tsa.convert_to_grayscale(base_img)
                            print('-> shape {}|mean={}'.format(rescaled_img.shape,
                                                               rescaled_img.mean()))

                            # spread the spectrum to improve contrast
                            print('-> spreading spectrum')
                            high_contrast_img = tsa.spread_spectrum(rescaled_img)
                            print('-> shape {}|mean={}'.format(high_contrast_img.shape,
                                                               high_contrast_img.mean()))

                            # get the masked image
                            print('-> masking image')
                            masked_img = tsa.roi(high_contrast_img, threat_zone[img_num])
                            print('-> shape {}|mean={}'.format(masked_img.shape,
                                                               masked_img.mean()))

                            # crop the image
                            print('-> cropping image')
                            cropped_img = tsa.crop(masked_img, crop_dims[img_num])
                            print('-> shape {}|mean={}'.format(cropped_img.shape,
                                                               cropped_img.mean()))

                            # normalize the image
                            print('-> normalizing image')
                            normalized_img = tsa.normalize(cropped_img)
                            print('-> shape {}|mean={}'.format(normalized_img.shape,
                                                               normalized_img.mean()))

                            # zero center the image
                            print('-> zero centering')
                            zero_centered_img = tsa.zero_center(normalized_img)
                            print('-> shape {}|mean={}'.format(zero_centered_img.shape,
                                                               zero_centered_img.mean()))

                            # append the features and labels to this threat zone's example array
                            print ('-> appending example to threat zone {}'.format(tz_num))
                            threat_zone_examples.append([[tz_num], zero_centered_img, label])
                            print ('-> shape {:d}:{:d}:{:d}:{:d}:{:d}:{:d}'.format(
                                                                 len(threat_zone_examples),
                                                                 len(threat_zone_examples[0]),
                                                                 len(threat_zone_examples[0][0]),
                                                                 len(threat_zone_examples[0][1][0]),
                                                                 len(threat_zone_examples[0][1][1]),
                                                                 len(threat_zone_examples[0][2])))
                        # count = 0

        # each subject gets EXAMPLES_PER_SUBJECT number of examples (182 to be exact,
        # so this section just writes out the the data once there is a full minibatch
        # complete.
        if ((len(threat_zone_examples) % (BATCH_SIZE * EXAMPLES_PER_SUBJECT)) == 0):
            for tz_num, tz in enumerate(tsa.zone_slice_list):

                tz_examples_to_save = []

                # write out the batch and reset
                print(' -> writing: ' + PREPROCESSED_DATA_FOLDER +
                                        'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(
                                        tz_num+1,
                                        len(threat_zone_examples[0][1][0]),
                                        len(threat_zone_examples[0][1][1]),
                                        batch_num))

                # get this tz's examples
                tz_examples = [example for example in threat_zone_examples if example[0] ==
                               [tz_num]]

                # drop unused columns
                tz_examples_to_save.append([[features_label[1], features_label[2]]
                                            for features_label in tz_examples])

                # save batch.  Note that the trainer looks for tz{} where {} is a
                # tz_num 1 based in the minibatch file to select which batches to
                # use for training a given threat zone
                np.save(PREPROCESSED_DATA_FOLDER +
                        'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                                                         len(threat_zone_examples[0][1][0]),
                                                         len(threat_zone_examples[0][1][1]),
                                                         batch_num),
                                                         tz_examples_to_save)
                del tz_examples_to_save

            #reset for next batch
            del threat_zone_examples
            threat_zone_examples = []
            batch_num += 1

    # we may run out of subjects before we finish a batch, so we write out
    # the last batch stub

    if (len(threat_zone_examples) > 0):
        for tz_num, tz in enumerate(tsa.zone_slice_list):

            tz_examples_to_save = []

            # write out the batch and reset
            print(' -> writing: ' + PREPROCESSED_DATA_FOLDER
                    + 'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                      len(threat_zone_examples[0][1][0]),
                      len(threat_zone_examples[0][1][1]),
                      batch_num))

            # get this tz's examples
            tz_examples = [example for example in threat_zone_examples if example[0] ==
                           [tz_num]]

            # drop unused columns
            tz_examples_to_save.append([[features_label[1], features_label[2]]
                                        for features_label in tz_examples])

            #save batch
            np.save(PREPROCESSED_DATA_FOLDER +
                    'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                                                     len(threat_zone_examples[0][1][0]),
                                                     len(threat_zone_examples[0][1][1]),
                                                     batch_num),
                                                     tz_examples_to_save)
Пример #58
0
def main(block_name):

    for pickle_file in glob.glob(sys.argv[1] + block_name + "/*.pickle"):
        subject = pickle_file[len(pickle_file) - 12:len(pickle_file) - 7]
        batch_size = 25
        patch_size = 5  # filter size
        myInitializer = None

        if (block_name == "face"):
            image_size_h = 72
            image_size_w = 52
        elif (block_name == "mouth"):
            image_size_h = 24
            image_size_w = 40
        elif (block_name == "eye"):
            image_size_h = 24
            image_size_w = 32
        elif (block_name == "topnose"):
            image_size_h = 36
            image_size_w = 40
        elif (block_name == "nosetip"):
            image_size_h = 32
            image_size_w = 40

        num_labels = 7  #the output of the network (7 neuron)
        #num_channels = 3  # colour images have 3 channels
        num_channels = 1  # grayscale images have 1 channel

        # Load the pickle file containing the dataset
        with open(pickle_file, 'rb') as f:
            save = pickle.load(f)
            train_dataset = save['training_dataset']
            train_labels = save['training_emotion_label']
            valid_dataset = save['validation_dataset']
            valid_labels = save['validation_emotion_label']
            test_dataset = save['test_dataset']
            test_labels = save['test_emotion_label']
            del save  # hint to help gc free up memory
            # Here I print the dimension of the three datasets
            print('Training set', train_dataset.shape, train_labels.shape)
            print('Validation set', valid_dataset.shape, valid_labels.shape)
            print('Test set', test_dataset.shape, test_labels.shape)

        train_dataset = train_dataset.reshape(
            (-1, image_size_w, image_size_h, num_channels)).astype(np.float32)
        train_labels = train_labels.reshape((-1)).astype(np.ndarray)
        valid_dataset = valid_dataset.reshape(
            (-1, image_size_w, image_size_h, num_channels)).astype(np.float32)
        valid_labels = valid_labels.reshape((-1)).astype(np.ndarray)
        test_dataset = test_dataset.reshape(
            (-1, image_size_w, image_size_h, num_channels)).astype(np.float32)
        test_labels = test_labels.reshape((-1)).astype(np.ndarray)

        # create the arrays from string, removing brackets as well
        train_labels_new = extractArraysRemoveBrackets(train_labels)
        valid_labels_new = extractArraysRemoveBrackets(valid_labels)
        test_labels_new = extractArraysRemoveBrackets(test_labels)

        train_dataset = image_histogram_equalization(train_dataset)
        valid_dataset = image_histogram_equalization(valid_dataset)
        test_dataset = image_histogram_equalization(test_dataset)

        train_dataset = minmax_normalization(train_dataset)
        valid_dataset = minmax_normalization(valid_dataset)
        test_dataset = minmax_normalization(test_dataset)

        #Printing the new shape of the datasets
        print('Training set', train_dataset.shape, train_labels.shape)
        print('Validation set', valid_dataset.shape, valid_labels_new.shape)
        print('Test set', test_dataset.shape, test_labels_new.shape)

        #Declaring the graph object necessary to build the model
        graph = tf.Graph()
        with graph.as_default():

            print("Init Tensorflow variables...")
            tf_train_dataset = tf.placeholder(tf.float32,
                                              shape=(batch_size, image_size_w,
                                                     image_size_h,
                                                     num_channels))
            tf_train_labels = tf.placeholder(tf.float32,
                                             shape=(batch_size, num_labels))
            tf_valid_dataset = tf.constant(valid_dataset)
            tf_test_dataset = tf.constant(test_dataset)

            # Conv layer
            # [patch_size, patch_size, num_channels, depth]
            #conv1_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, 6], stddev=0.1), name="conv1y_w")
            conv1_weights = tf.get_variable(
                name="conv1y_w",
                shape=[patch_size, patch_size, num_channels, 6],
                initializer=myInitializer)
            conv1_biases = tf.Variable(tf.zeros([6]), name="conv1y_b")
            # Conv layer
            # [patch_size, patch_size, depth, depth]
            conv2_weights = tf.get_variable(
                name="conv2y_w",
                shape=[patch_size, patch_size, 6, 12],
                initializer=myInitializer)
            conv2_biases = tf.Variable(tf.zeros([12]), name="conv2y_b")

            # Output layer
            conv1_size_w = (image_size_w - patch_size + 1) / 2
            conv2_size_w = (conv1_size_w - patch_size + 1) / 2
            conv1_size_h = (image_size_h - patch_size + 1) / 2
            conv2_size_h = (conv1_size_h - patch_size + 1) / 2

            dense1_weights = tf.get_variable(
                name="dense1y_w",
                shape=[conv2_size_w * conv2_size_h * 12, 256],
                initializer=myInitializer)
            dense1_biases = tf.Variable(tf.zeros([256], name="dense1y_b"))

            # Output layer
            layer_out_weights = tf.get_variable(name="outy_w",
                                                shape=[256, num_labels],
                                                initializer=myInitializer)
            layer_out_biases = tf.Variable(tf.zeros(shape=[num_labels]),
                                           name="outy_b")

            # dropout (keep probability) - not used really up to now
            keep_prob = tf.placeholder(tf.float32)

            model_output = model(tf_train_dataset, image_size_w, image_size_h,
                                 num_channels, conv1_weights, conv1_biases,
                                 conv2_weights, conv2_biases, dense1_weights,
                                 dense1_biases, layer_out_weights,
                                 layer_out_biases, keep_prob)
            loss = tf.reduce_mean(
                tf.reduce_sum(tf.square(model_output - tf_train_labels)))

            loss_summ = tf.summary.scalar("loss", loss)

            global_step = tf.Variable(
                0, trainable=False)  # count the number of steps taken.
            learning_rate = tf.train.exponential_decay(0.00125,
                                                       global_step,
                                                       300,
                                                       0.5,
                                                       staircase=True)
            lrate_summ = tf.summary.scalar(
                "learning rate",
                learning_rate)  #save in a summary for Tensorboard
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate).minimize(loss, global_step=global_step)

            train_prediction = model_output
            valid_prediction = model(tf_valid_dataset, image_size_w,
                                     image_size_h, num_channels, conv1_weights,
                                     conv1_biases, conv2_weights, conv2_biases,
                                     dense1_weights, dense1_biases,
                                     layer_out_weights, layer_out_biases)
            test_prediction = model(tf_test_dataset, image_size_w,
                                    image_size_h, num_channels, conv1_weights,
                                    conv1_biases, conv2_weights, conv2_biases,
                                    dense1_weights, dense1_biases,
                                    layer_out_weights, layer_out_biases)

            saver = tf.train.Saver()

            total_epochs = 500

            with tf.Session(graph=graph) as session:
                merged_summaries = tf.summary.merge_all()
                now = datetime.datetime.now()
                log_path = "./sessions/summary_log/summaries_logs_p" + subject + str(
                    now.hour) + str(now.minute) + str(now.second)
                writer_summaries = tf.summary.FileWriter(
                    log_path, session.graph)
                tf.global_variables_initializer().run()

                epochs = np.ndarray(0, int)
                losses = np.ndarray(0, np.float32)
                accuracy_batch = np.ndarray(0, np.float32)
                accuracy_valid = np.ndarray(0, np.float32)

                start = timer()
                for epoch in range(total_epochs):
                    batch = create_batch(train_dataset, train_labels_new)
                    batch_data = batch[0]
                    batch_labels = batch[1]
                    feed_dict = {
                        tf_train_dataset: batch_data,
                        tf_train_labels: batch_labels,
                        keep_prob: 1.0
                    }
                    _, l, predictions, my_summary = session.run(
                        [optimizer, loss, model_output, merged_summaries],
                        feed_dict=feed_dict)
                    writer_summaries.add_summary(my_summary, epoch)

                    epochs = np.append(epochs, int(epoch + 1))
                    losses = np.append(losses, l)
                    accuracy_batch = np.append(
                        accuracy_batch,
                        accuracy(predictions, batch_labels, False))
                    accuracy_valid = np.append(
                        accuracy_valid,
                        accuracy(valid_prediction.eval(), valid_labels_new,
                                 False))
                    '''
                    if (epoch % 50 == 0):
                        print("")
                        print("Loss at epoch: ", epoch, " is " , l)
                        print("Global Step: " + str(global_step.eval()) + " of " + str(total_epochs))
                        print("Learning Rate: " + str(learning_rate.eval()))
                        print("Minibatch size: " + str(batch_labels.shape))
                        print("Validation size: " + str(valid_labels_new.shape))
                        accuracy(predictions, batch_labels, True)
                        print("")
                    '''

                end = timer()
                sessionTime = end - start
                saver.save(session,
                           "./sessions/tensorflow/cnn_arch1_pitch_p" + subject,
                           global_step=epoch)  # save the session
                accuracy_test = accuracy(test_prediction.eval(),
                                         test_labels_new, True)
                output = np.column_stack(
                    (epochs.flatten(), losses.flatten(),
                     accuracy_batch.flatten(), accuracy_valid.flatten()))
                np.savetxt(
                    "./sessions/epochs_log/subject_" + subject + ".txt",
                    output,
                    header="epoch    loss    accuracy_batch    accuracy_valid",
                    footer="accuracy_test:\n" + str(accuracy_test) +
                    "\ntime:\n" + str(sessionTime),
                    delimiter='   ')
                print("# Test size: " + str(test_labels_new.shape))
Пример #59
0
def train_model(params):
    """
        Main function
    """

    if params['RELOAD'] > 0:
        logging.info('Resuming training.')

    check_params(params)

    ########### Load data
    if params['BINARY_SELECTION']:
        params['POSITIVE_FILENAME'] = params['DATA_ROOT_PATH'] + '/' + params[
            'POSITIVE_FILENAME']
        params['NEGATIVE_FILENAME'] = params['DATA_ROOT_PATH'] + '/' + params[
            'NEGATIVE_FILENAME']
    params = process_files_binary_classification(params)
    dataset = build_dataset(params)
    params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['INPUTS_IDS_DATASET'][0]]
    ###########

    ########### Build model
    if params['RELOAD'] == 0:  # build new model
        text_class_model = Text_Classification_Model(
            params,
            type=params['MODEL_TYPE'],
            verbose=params['VERBOSE'],
            model_name=params['MODEL_NAME'],
            vocabularies=dataset.vocabulary,
            store_path=params['STORE_PATH'])

        # Define the inputs and outputs mapping from our Dataset instance to our model
        inputMapping = dict()
        for i, id_in in enumerate(params['INPUTS_IDS_DATASET']):
            pos_source = dataset.ids_inputs.index(id_in)
            id_dest = text_class_model.ids_inputs[i]
            inputMapping[id_dest] = pos_source
        text_class_model.setInputsMapping(inputMapping)

        outputMapping = dict()
        for i, id_out in enumerate(params['OUTPUTS_IDS_DATASET']):
            pos_target = dataset.ids_outputs.index(id_out)
            id_dest = text_class_model.ids_outputs[i]
            outputMapping[id_dest] = pos_target
        text_class_model.setOutputsMapping(outputMapping)

    else:  # resume from previously trained model
        text_class_model = loadModel(params['STORE_PATH'], params['RELOAD'])
        text_class_model.setOptimizer()
    ###########

    ########### Callbacks
    callbacks = buildCallbacks(params, text_class_model, dataset)
    ###########

    ########### Training
    total_start_time = timer()

    logger.debug('Starting training!')
    training_params = {
        'n_epochs': params['MAX_EPOCH'],
        'batch_size': params['BATCH_SIZE'],
        'homogeneous_batches': params['HOMOGENEOUS_BATCHES'],
        'shuffle': True,
        'epochs_for_save': params['EPOCHS_FOR_SAVE'],
        'verbose': params['VERBOSE'],
        'eval_on_sets': params['EVAL_ON_SETS_KERAS'],
        'n_parallel_loaders': params['PARALLEL_LOADERS'],
        'extra_callbacks': callbacks,
        'reload_epoch': params['RELOAD'],
        'data_augmentation': params.get('DATA_AUGMENTATION', False)
    }
    text_class_model.trainNet(dataset, training_params)

    total_end_time = timer()
    time_difference = total_end_time - total_start_time
    logging.info('In total is {0:.2f}s = {1:.2f}m'.format(
        time_difference, time_difference / 60.0))
Пример #60
0
def move(instruction, amt):
    if instruction == "N":
        pos.y += amt
    elif instruction == "S":
        pos.y -= amt
    elif instruction == "E":
        pos.x += amt
    elif instruction == "W":
        pos.x -= amt
    elif instruction == "L":
        pos.rotate(-amt)
    elif instruction == "R":
        pos.rotate(amt)
    elif instruction == "F":
        move(degrees[pos.facing], amt)


start = timer()
file = open('input.txt')

for line in file.readlines():
    line = line.strip()
    instruction = line[0]
    amt = int(line[1:])

    move(instruction, amt)

result = abs(pos.x) + abs(pos.y)

print("Completed in %fms" % ((timer() - start) * 1000))
print("%d is the result" % result)