def start(self, user_to_vote, session, vote_type): """ Start the voting session. :param user_to_vote: The user up for voting. :type user_to_vote: User :param session: The session time in seconds. :type session: int :param vote_type: The type of vote. :type vote_type: str :return: True if the vote session was started :rtype: bool """ log.debug('%s, session=%s, vote_type=%s' % (user_to_vote, session, vote_type)) if not self._has_active_session(): self._user_to_vote = user_to_vote self._vote_session = session self._vote_type = vote_type # start the timer self._vote_timer = Timer() self._vote_timer.start(self._decide_vote, self._vote_session) return True return False
def run(self, image_name, get_label=False, do_detection=1): """detection and extraction with max score box""" ### for web demo #caffe.set_mode_gpu() #print "do_detection: ",do_detection if do_detection: t1 = Timer() t1.tic() image = self.detect(image_name) t1.toc('Detect time: ') #print "Detection has done" else: image = cv2.imread(image_name) #image = imresize(im, 300) t2 = Timer() t2.tic() image = pad(image,size=224) #image = pad(image) features = extraction.forward(self.net_e, image, self.transformer) r = np.squeeze(features['pool5/7x7_s1'].data[0]) #features2 = extraction.forward(self.net_e2, image, self.transformer2) #r2 = np.squeeze(features2['pool5/7x7_s1'].data[0]) #r = r2 #r = np.hstack((r, r2)).copy() t2.toc('extract time: ') #start = time.time() if self.pca is not None: r = self.pca.transform(r)[0,:] #print 'pca time: ', time.time() - start r = r/norm(r) if get_label: label = np.squeeze(features['prob'].data[0].copy()) return r, label return r
class OnwardState(object): def __init__(self): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] sw3.Forward(SPEED).start() def processFrame(self, frame): print "onward state" path = vision.ProcessFrame(frame) if path.found: self.pathLost.restart() sw3.Forward(SPEED).start() print "Speed %.2f" % SPEED elif not self.pathLost.timeLeft(): """if the path has been lost for too long go to path lost state""" return LostState() print "ret found" return self def cont(self): #path missions never stops while we see path return True
def _collect_metrics_atomic(self, instance, mor): """ Task that collects the metrics listed in the morlist for one MOR """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=mor['mor'], metricId=mor['metrics'], intervalId=20, format='normal') results = perfManager.QueryPerf(querySpec=[query]) if results: for result in results[0].value: if result.id.counterId not in self.metrics_metadata[i_key]: self.log.debug("Skipping this metric value, because there is no metadata about it") continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) self.gauge("vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'], value, hostname=mor['hostname'], tags=['instance:%s' % instance_name] ) ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
def main(): client = pymongo.MongoClient(IP) db = client[DB_NAME] col = db['entity'] timer = Timer() timer.start() with bz2.open(DUMP_PATH) as f: for ln, line in enumerate(f): try: line = line.decode().strip() entity = json.loads(line[:-1]) res = col.insert_one(entity) except Exception as e: print(e) if (ln + 1) % 1000 == 0: print(f"{ln + 1} entities inserted...[{timer.diff():.2f} sec]") new_col = db['lang'] docs = col.find({"id": {"$regex": "^Q"}}) for idx, doc in enumerate(docs): cleaned_doc = {'id': doc['id']} wiki_langs = set(doc['sitelinks'].keys()) for lang in LANGS + ['en']: if f"{lang}wiki" in wiki_langs: cleaned_doc[f"{lang}wiki"] = 1 else: cleaned_doc[f"{lang}wiki"] = 0 new_col.insert_one(cleaned_doc) if (idx + 1) % 1000 == 0: time_spent = f"[{timer.diff():.2f} sec]" print(f"{idx + 1} entities' languages indexed..." + time_spent)
def go_svm(proc, pca_enabled, central): print("SVM") print("0-1", proc) # only 0 and 1 print("Central", central) # 32x32 print("PCA", pca_enabled) # PCA to 50 dims train_x, train_y = mnist.train() test_x, test_y = mnist.test() if central: train_x = mnist.train_32() test_x = mnist.test_32() if proc: with Timer("process"): train_x = process(train_x) test_x = process(test_x) train_x = train_x.reshape((train_x.shape[0], -1)) test_x = test_x.reshape((test_x.shape[0], -1)) if pca_enabled: with Timer("PCA"): pca = PCA(n_components=50, whiten=True) train_x = pca.fit_transform(train_x) test_x = pca.transform(test_x) with Timer("train"): clf = svm.SVC(cache_size=7000) clf.fit(train_x, train_y) print("Accuracy:", clf.score(test_x, test_y))
def go(proc, central): print("kNN") print("0-1", proc) print("central", central) train_x, train_y = mnist.train() test_x, test_y = mnist.test() if central: train_x = mnist.train_32() test_x = mnist.test_32() if proc: with Timer("process"): train_x = process(train_x) test_x = process(test_x) train_x = train_x.reshape((train_x.shape[0], -1)) test_x = test_x.reshape((test_x.shape[0], -1)) with Timer("kNN fit"): neigh = KNeighborsClassifier(n_neighbors=5, n_jobs=-1) neigh.fit(train_x, train_y) with Timer("kNN test"): print("Accuracy:", neigh.score(test_x, test_y))
def _cache_metrics_metadata(self, instance): """ Get from the server instance, all the performance counters metadata meaning name/group/description... attached with the corresponding ID """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) self.log.info("Warming metrics metadata cache for instance {0}".format(i_key)) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager new_metadata = {} for counter in perfManager.perfCounter: d = dict( name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key), unit = counter.unitInfo.key, instance_tag = 'instance' # FIXME: replace by what we want to tag! ) new_metadata[counter.key] = d self.cache_times[i_key][METRICS_METADATA][LAST] = time.time() self.log.info("Finished metadata collection for instance {0}".format(i_key)) # Reset metadata self.metrics_metadata[i_key] = new_metadata ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
def test_invalid_stop(): t = Timer() t.stop() assert t.running == False assert t.remaining_time == 0
def _cache_morlist_process_atomic(self, instance, mor): """ Process one item of the self.morlist_raw list by querying the available metrics for this MOR and then putting it in self.morlist """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager self.log.debug("job_atomic: Querying available metrics" " for MOR {0} (type={1})".format( mor['mor'], mor['mor_type'])) available_metrics = perfManager.QueryAvailablePerfMetric( mor['mor'], intervalId=REAL_TIME_INTERVAL) mor['metrics'] = self._compute_needed_metrics(instance, available_metrics) mor_name = str(mor['mor']) if mor_name in self.morlist[i_key]: # Was already here last iteration self.morlist[i_key][mor_name]['metrics'] = mor['metrics'] else: self.morlist[i_key][mor_name] = mor self.morlist[i_key][mor_name]['last_seen'] = time.time() ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
def search(self, image_path, do_detection=1, k=10): #queryImage = cv2.imread(image_path) t1 = Timer() t1.tic() #queryFeatures = descriptor.get_descriptor(image_path, multi_box=False) queryFeatures = descriptor.get_descriptor(image_path) t1.toc('Feature Extraction time: ') t2 = Timer() t2.tic() #p = Profile() #results = p.runcall(self.searcher.search, queryFeatures) #p.print_stats() results, dists, ind = self.searcher.search(queryFeatures,k=5*k) #self.reranking(queryFeatures, results, dists, ind, 0.6) #self.queryExpansion2(results, dists, ind) #self.queryExpansion(queryFeatures, results, dists, ind, top=3) t2.toc('Knn search time: ') result = [] # origine image #result.append(image_path) dist = [] for j,imageName in enumerate(results): if imageName not in result: result.append(imageName) dist.append(dists[j]) #print result[:k] return result[:k],dist[:k]
def _cache_metrics_metadata(self, instance): """ Get from the server instance, all the performance counters metadata meaning name/group/description... attached with the corresponding ID """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) self.log.info( "Warming metrics metadata cache for instance {0}".format(i_key)) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager new_metadata = {} for counter in perfManager.perfCounter: d = dict( name="%s.%s" % (counter.groupInfo.key, counter.nameInfo.key), unit=counter.unitInfo.key, instance_tag='instance' # FIXME: replace by what we want to tag! ) new_metadata[counter.key] = d self.cache_times[i_key][METRICS_METADATA][LAST] = time.time() self.log.info( "Finished metadata collection for instance {0}".format(i_key)) # Reset metadata self.metrics_metadata[i_key] = new_metadata ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
def GetPredictions(file=None): results = [] # So I don't have to re-do a lot of predictions if I start and stop records_to_skip = 0 if file is not None: results = pd.read_csv(file).to_dict('records') records_to_skip = len(results) count = 0 total_count = req_reviews.shape[0] t = Timer() t.Start() for row in req_reviews.iterrows(): count += 1 if count > records_to_skip: # in the event we're continuing a file, jump to the last record predicted = PredictReview(row[1].reviewerID, row[1].asin) results.append({"datapointID":row[1].datapointID,"overall":predicted}) if count % 1000 == 0: # informative prints so we know it's still working t.Stop() super_print("({} of {}) ({:.4f}s/prediction)".format(count, total_count, t.elapsed/1000)) t.Start() DataFrame(results).to_csv("output.csv", index=False) DataFrame(results).to_csv("output.csv", index=False)
def _cache_morlist_process_atomic(self, instance, mor): t = Timer() i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager self.log.debug( "job_atomic: Querying available metrics" " for MOR {0} (type={1})".format(mor['mor'], mor['mor_type']) ) available_metrics = perfManager.QueryAvailablePerfMetric( mor['mor'], intervalId=REAL_TIME_INTERVAL) mor['metrics'] = self._compute_needed_metrics(instance, available_metrics) mor_name = str(mor['mor']) if mor_name in self.morlist[i_key]: self.morlist[i_key][mor_name]['metrics'] = mor['metrics'] else: self.morlist[i_key][mor_name] = mor self.morlist[i_key][mor_name]['last_seen'] = time.time() self.histogram('datamonitor.agent.vsphere.morlist_process_atomic.time', t.total())
def tick(self, ms): self.player.move(ms) if self.player.collide_rect.left >= self.world_map.pixel_width - 16: debug('Level complete') return True # Handle collisions with walls/platforms try: self.collide_walls(ms) except OutOfBounds: debug('%s out of bounds', self.player.collide_rect) raise FellOffMap() # Check for jump every frame, in case user is holding down the button if not self.jump_wait_timer and self.input_state and self.input_state['up'] and self.jump_timer.jump_allowed(): debug('jump') self.jump_timer.unset() self.jump_wait_timer = Timer(config.getint('Physics','jump_wait_time')) # wait a bit between jumps self.player.start_jump() elif self.jump_wait_timer: if self.jump_wait_timer.check(ms): self.jump_wait_timer = None # Center camera on player self.camera.center = self.player.rect.center # Constrain camera to the level self.camera.right = min(self.camera.right, self.world_map.pixel_width) self.camera.bottom = min(self.camera.bottom, self.world_map.pixel_height) self.camera.left = max(self.camera.left, 0) self.camera.top = max(self.camera.top, 0) self.renderer.set_camera_position(self.camera.centerx, self.camera.centery) self.renderer.set_camera_margin(0, 0, 0, 0) # something is resetting the margin to 16px each frame... grrr
def tick(blackboard): """ This is the main entry point from C++ into our Python behaviours and back. More specifically it is the bridge from which C++ calls Python inside the runswift executable, and receives a BehaviourRequest back. Currently called in `robot/perception/behaviour/python/PythonSkill.cpp`, the `PythonSkill::execute()` C++ function, and explicitly the line `behaviour_tick(bb)`. :param blackboard: The runswift Blackboard, a bunch of things stored in global memory. :return: A `robot.BehaviourRequest()` instance, defined in C++ inside `robot/types/BehaviourRequest.hpp`. """ # Update all blackboard dependent helper modules. Global.update(blackboard) TeamStatus.update(blackboard) FieldGeometry.update(blackboard) Timer.update(blackboard) LedOverride.reset() Sonar.update(blackboard) # Set the HeadSkill HeadSkill.singleton.resetRequestState() global skill_instance if not skill_instance: skill_instance = skill_instance_factory(blackboard) if isinstance(skill_instance, BehaviourTask): # On every tick of the perception thread, we update the blackboard, # tick the skill, and then return the resulting behaviour request. skill_instance.world.update(blackboard) skill_instance.world.b_request = robot.BehaviourRequest() skill_instance.world.b_request.actions = robot.All() skill_instance.tick() request = skill_instance.world.b_request else: # Backwards compat for old style skills if called directly via -s. request = skill_instance.tick(blackboard) headRequest = HeadSkill.singleton.tick(blackboard) request.actions.head = headRequest.actions.head # LED colouring. if len(blackboard.vision.uncertain_balls) > 0: request.actions.leds.rightEye = LEDColour.blue elif len(blackboard.vision.balls) > 0: request.actions.leds.rightEye = LEDColour.red else: request.actions.leds.rightEye = LEDColour.off if Global.amILost(): request.actions.leds.leftEye = LEDColour.off else: request.actions.leds.leftEye = LEDColour.cyan return request
class FoundState(object): def __init__(self): #after gate has not been seen for 2 secs, quit self.pathLost = Timer(2) self.centers = [] sw3.Forward(.3).start() def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ finding out how many pixels from the center the gate is the center obj of the gate is the number or pixels over the gate is. Subtracting the middle pixel index from it returns a pos value if the gate is to left and pos value if the gate is to the right """ print("got direction %d" % path.orientation) sw3.RelativeYaw(path.orientation).start() elif not self.pathLost.timeLeft(): """if the gate has been lost for too long go to gate lost state""" return PathLostState() print "ret found" return self def cont(self): #gate missions never stops while we see gate return True
class FoundState(object): def __init__(self): #after gate has not been seen for 2 secs, quit self.pathLost = Timer(2) self.centers = [] sw3.Forward(.3 ).start() def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ finding out how many pixels from the center the gate is the center obj of the gate is the number or pixels over the gate is. Subtracting the middle pixel index from it returns a pos value if the gate is to left and pos value if the gate is to the right """ print("got direction %d" % path.orientation) sw3.RelativeYaw(path.orientation).start() elif not self.pathLost.timeLeft(): """if the gate has been lost for too long go to gate lost state""" return PathLostState() print "ret found" return self def cont(self): #gate missions never stops while we see gate return True
class ToCenterState(object): def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start() def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ moving to the center of the path. """ pts = [[path.p1x, path.p1y], [path.p2x, path.p2y]] self.startPt, self.endPt = sortPoints(self.startPt, pts) center = (path.cx, path.cy) if moveTo(frame, center) <= MAXDIST: return SecondTurnState(self.startPt, self.endPt) elif not self.pathLost.timeLeft(): """if the path has been lost for too long go to path lost state""" return PathLostState() print "ret found" return self def cont(self): #path missions never stops while we see path return True
def test_2(batch_size,num_output,feature_dim): print('Current test for batch size %d, num_output %d, feature_dim %d' % (batch_size, num_output, feature_dim)) batch = np.random.random((feature_dim, batch_size)).astype(np.float32) weight = np.random.random((feature_dim, num_output)).astype(np.float32) mult_res = np.zeros((batch_size,num_output), np.float32) with Timer('---test_2 numpy mult'): real_res = np.dot(batch.T,weight) #with Timer('---test_2 Gpu matmult'): # mult_res = compute(batch,weight, mult_res) #with Timer('---test_2 mxnet nd GPU dot'): # nd_gpu_result = mx_matrix_mult(batch, weight, mx.gpu(gpu_id)) with Timer('---test_2 nd CPU mult'): nd_cpu_result = mx_matrix_mult(batch, weight) with Timer('---test_2 TF matmul'): tf_result = tf_matrix_mult(batch, weight,ctx='cpu') #assert is_equal(mult_res, real_res), "Gpu dist is not equal numpy dis!!!" #assert is_equal(nd_gpu_result, real_res), "Mxnet GPU dist not equal to numpy dis!!!" assert is_equal(nd_cpu_result, real_res), "Mxnet CPU dist not equal to numpy dis!!!" assert is_equal(tf_result, real_res), "TF malt result is not correct!!!" #assert is_equal(nd_gpu_result, mult_res), "Mxnet nd gpu dist is not equal to gpu mat mult dis!!!" print('test ok')
def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start()
def _cache_morlist_process_atomic(self, instance, mor): """ Process one item of the self.morlist_raw list by querying the available metrics for this MOR and then putting it in self.morlist """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager self.log.debug( "job_atomic: Querying available metrics" " for MOR {0} (type={1})".format(mor['mor'], mor['mor_type']) ) available_metrics = perfManager.QueryAvailablePerfMetric( mor['mor'], intervalId=REAL_TIME_INTERVAL) mor['metrics'] = self._compute_needed_metrics(instance, available_metrics) mor_name = str(mor['mor']) if mor_name in self.morlist[i_key]: # Was already here last iteration self.morlist[i_key][mor_name]['metrics'] = mor['metrics'] else: self.morlist[i_key][mor_name] = mor self.morlist[i_key][mor_name]['last_seen'] = time.time() ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
def compute_Ws(X, num_ccs): with Timer('Calculating pairwise distances...'): D = pairwise_distances(X, metric='sqeuclidean') np.save('mnist_D.npy', D) # k-nn with Timer('Calculating knn graph...'): for k in xrange(1, 10): Wknn = neighbor_graph(D, precomputed=True, k=k, symmetrize=True) n = connected_components(Wknn, directed=False, return_labels=False) if n <= num_ccs: break else: assert False, 'k too low' np.save('mnist_Wknn.npy', Wknn) print 'knn (k=%d)' % k # b-matching with Timer('Calculating b-matching graph...'): # using 8 decimal places kills the disk Wbma = hacky_b_matching(D, k, fmt='%.1f') np.save('mnist_Wbma.npy', Wbma) # msg with Timer('Calculating MSG graph...'): Wmsg = manifold_spanning_graph(X, 2, num_ccs=num_ccs) np.save('mnist_Wmsg.npy', Wmsg) return D, Wknn, Wbma, Wmsg
def cum_counts(feat, bins = None): """ Given an N1 x N2 x ... Nn array of integer features F which take values 0 to M - 1, return an (N1+1) x (N2+1) x ... (Nn+1) x M array C such that C[i1, ..., in, a] = #{ (j1, .. jn), forall k, jk < ik, F[j1, .. jn] == a } """ bins = bins or numpy.max(feat) + 1 counts = numpy.zeros(((bins,) + feat.shape), dtype = numpy.int32) print feat.shape, bins, numpy.product(feat.shape) * bins with Timer("count"): for b in xrange(bins): counts[b] = feat == b gc.collect() with Timer("cumsum"): for axis in xrange(len(feat.shape)): numpy.cumsum(counts, axis = axis + 1, out = counts) with Timer("copy"): C = numpy.zeros([numpy.shape(counts)[0]] + [n + 1 for n in numpy.shape(counts)[1:]], dtype = numpy.float32) C[:, 1:, 1:] = counts del counts gc.collect() return C
def go(pca_enabled=False, centralize=False): print("PCA:", pca_enabled) print("Centralize:", centralize) train_x, train_y = mnist.train() test_x, test_y = mnist.test() if centralize: train_x = mnist.train_32() test_x = mnist.test_32() train_x = train_x.reshape((train_x.shape[0], -1)) test_x = test_x.reshape((test_x.shape[0], -1)) if pca_enabled: with Timer("PCA"): pca = PCA(n_components=50, whiten=True) train_x = pca.fit_transform(train_x) test_x = pca.transform(test_x) with Timer("train"): max_iter = 1000 if centralize or pca_enabled else 200 clf = MLPClassifier(max_iter=max_iter, verbose=True) clf.fit(train_x, train_y) print("Accuracy:", clf.score(test_x, test_y))
def __init__(self, iteration_num=1, use_test_params=True): # 模型训练的迭代次数 self.iteration_num = iteration_num # 共指消解日志、共指消解得到的簇日志、实体连接日志 self.coref_logger, self.export_clusters_logger, self.entity_linking_logger = self.init_system_logging() # 共指消解模型参数、实体连接参数 self.coref_params, self.linking_params = self.init_params(use_test_params=use_test_params) # 共指消解特征保存路径 self.coref_feat_map_save_path = Paths.CorefModels.get_feat_map_export_path(self._experiment_type(), self.iteration_num) # 共指消解模型保存路径 self.coref_model_save_path = Paths.CorefModels.get_model_export_path(self._experiment_type(), self.iteration_num) # 实体连接模型保存路径 self.linking_model_save_path = Paths.LinkingModels.get_model_export_path(self._experiment_type(), self.iteration_num) # 该抽象类的继承类一旦实例化则实例化计时器 self.timer = Timer() # 共指消解特征构成的训练集 self.trn_coref_states = [] # 共指消解特征构成的验证集 self.dev_coref_states = [] # 共指消解特征构成的测试集 self.tst_coref_states = [] # 定义的角色标记 self.other_label = "#other#" self.general_label = "#general#" self.linking_labels = ['monica geller', 'judy geller', 'jack geller', 'lily buffay', 'rachel green', 'joey tribbiani', 'phoebe buffay', 'carol willick', 'ross geller', 'chandler bing', 'gunther', 'ben geller', 'barry farber', 'richard burke', 'kate miller', 'peter becker', 'emily waltham'] + [self.other_label, self.general_label]
def Load_MovieData(): print("Loading Movie data from CSV...") t = Timer() t.Start() # Create a temporary table, since the dataset has duplicate IDs that violate # the Primary Key Constraint of FilmId. SQLite doesn't have an ADD CONSTRAINT # so we make an identical table without the constraint, fill it with data, # then copy that data line by line to the new table statement = ''' CREATE TABLE "Film_temp" ( 'FilmID' INTEGER, 'Title' TEXT, 'Release' TEXT, 'Budget' INTEGER, 'Revenue' INTEGER, 'Runtime' INTEGER, 'Rating' TEXT, 'Poster' TEXT, 'Rating_IMDB' INTEGER, 'Rating_RT' INTEGER, 'Rating_MC' INTEGER, 'BestPicture' INTEGER, 'AA_Wins' INTEGER, 'AA_Nominations' INTEGERS ); ''' cur.execute(statement) for f in pd.read_csv(MOVIEMETADATA_CSV, iterator=True): inserts = [] for row in f.itertuples(): inserts.append([ row[6], row[9], row[15], row[3], row[16], row[17], ]) statement = 'INSERT INTO Film_temp VALUES (?,?,?,?,?,?,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)' cur.executemany(statement, inserts) # the CSV contains duplicate entries for 29 films - remove them here statement = ''' DELETE FROM Film_temp WHERE FilmID IN (SELECT MIN(FilmID) FROM Film_temp GROUP BY FilmID HAVING COUNT(*) > 1) ''' cur.execute(statement) # copy the entirety of the temp table to the actual Film table, which has the PK constraint cur.execute("SELECT * FROM Film_temp") inserts = [] statement = 'INSERT INTO Film VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)' for row in cur: inserts.append(row) cur.executemany(statement, inserts) t.Stop() print("Movie Data loaded in " + str(t))
def temporal_join(u, v, v_name, key, time_col): timer = Timer() window_size = CONSTANT.WINDOW_SIZE if len(u) * CONSTANT.WINDOW_RATIO < CONSTANT.WINDOW_SIZE \ else int(len(u) * CONSTANT.WINDOW_RATIO) hash_max = CONSTANT.HASH_MAX if len(u) / CONSTANT.HASH_MAX > CONSTANT.HASH_BIN \ else int(len(u) / CONSTANT.HASH_BIN) # window_size = CONSTANT.WINDOW_SIZE # hash_max = CONSTANT.HASH_MAX if isinstance(key, list): assert len(key) == 1 key = key[0] tmp_u = u[[time_col, key]] timer.check("select") tmp_u = pd.concat([tmp_u, v], keys=['u', 'v'], sort=False) timer.check("concat") # rehash_key = f'rehash_{key}' # tmp_u[rehash_key] = tmp_u[key].apply(lambda x: hash(x) % CONSTANT.HASH_MAX) # timer.check("rehash_key") tmp_u.sort_values(time_col, inplace=True) timer.check("sort") agg_funcs = { col: Config.aggregate_op(col) for col in v if col != key and not col.startswith(CONSTANT.TIME_PREFIX) and not col.startswith(CONSTANT.MULTI_CAT_PREFIX) } # tmp_u = tmp_u.groupby(rehash_key).rolling(window=CONSTANT.WINDOW_SIZE).agg(agg_funcs) tmp_u = tmp_u.rolling(window=window_size).agg(agg_funcs) # timer.check("group & rolling & agg") # # tmp_u.reset_index(0, drop=True, inplace=True) # drop rehash index # timer.check("reset_index") tmp_u.columns = tmp_u.columns.map( lambda a: f"{CONSTANT.NUMERICAL_PREFIX}{a[1].upper()}_ROLLING5({v_name}.{a[0]})") if tmp_u.empty: log("empty tmp_u, return u") return u # ret = pd.concat([u, tmp_u3.loc['u']], axis=1, sort=False) ret = u.merge(tmp_u.loc['u'], right_index=True, left_index=True, how="outer") timer.check("final concat") del tmp_u, tmp2_u return ret
def neural_net_cancer(solver): cancer_data = load_data_set('breastcancer') cancer_imp = impute.SimpleImputer(missing_values=np.nan, strategy='mean') cancer_imp.fit( np.array(cancer_data['train']['inputs'] + cancer_data['test']['inputs'], dtype=np.float32)) clf = neural_network.MLPClassifier(solver=solver, warm_start=True, max_iter=1000) with Timer() as t: clf.fit(cancer_imp.transform(cancer_data['train']['inputs']), cancer_data['train']['outputs']) time_to_fit = t.interval * 1000 predicted = clf.predict( cancer_imp.transform(cancer_data['train']['inputs'])) train_f1_score = metrics.f1_score(cancer_data['train']['outputs'], predicted, average='micro') with Timer() as t: predicted = clf.predict( cancer_imp.transform(cancer_data['test']['inputs'])) test_f1_score = metrics.f1_score(cancer_data['test']['outputs'], predicted, average='micro') test_prediction_runtime = t.interval * 1000 data_in = cancer_imp.transform(cancer_data['train']['inputs'] + cancer_data['test']['inputs']) data_out = cancer_data['train']['outputs'] + cancer_data['test']['outputs'] t_out = cancer_data['test']['outputs'] accuracy = accuracy_score(t_out, predicted) * 100 precision = precision_score(t_out, predicted, average="weighted") * 100 print("breastcancer.dataset (solver={})".format(solver)) print("training f1 score:", train_f1_score) print("test f1 score:", test_f1_score) print("time to fit:", time_to_fit) print("test prediction runtime:", test_prediction_runtime) print("test accuracy", accuracy) print("test precision", precision) print() skplt.estimators.plot_learning_curve( clf, data_in, data_out, title="Learning Curve: Neural Net (breastcancer.dataset, solver={})". format(solver), cv=5) plt.savefig('out/neural_net/breastcancer-solver-{}.png'.format(solver))
def _send_message(self, message, success=None, error=None, *args, **kwargs): if message.startswith('msg'): try: how_long = int(message.split()[1]) t = Timer(how_long, self.protocol.incomming_message, self.self_buddy, u"Here's your message %ds later" % how_long) t.start() except Exception: pass
def setup_keys(self, key_length=1024): with Timer(logstring=" Key generation") as t: self.p = paillier.Paillier(key_length=1024) self.benchmarks['c_key_generate'] = (t.secs, 0, t.mem) with Timer(logstring=" Key upload") as t: snd_bytes = self.peer.send(self.p.pubkey) self.benchmarks['c_key_upload'] = (t.secs, snd_bytes, t.mem)
def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, move to onward state self.pathLost = Timer(2) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start() sw3.Strafe(0).start()
def _collect_metrics_atomic(self, instance, mor): """ Task that collects the metrics listed in the morlist for one MOR """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=mor['mor'], metricId=mor['metrics'], intervalId=20, format='normal') results = perfManager.QueryPerf(querySpec=[query]) if results: for result in results[0].value: if result.id.counterId not in self.metrics_metadata[i_key]: self.log.debug("Skipping this metric value, because there is no metadata about it") continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) # Metric types are absolute, delta, and rate if ALL_METRICS[self.metrics_metadata[i_key][result.id.counterId]['name']]['s_type'] == 'rate': record_metric = self.rate else: record_metric = self.gauge ip = "unknown" content = server_instance.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vmFolder = datacenter.vmFolder vmList = vmFolder.childEntity for vm in vmList: if isinstance(vm, vim.VirtualMachine): ip = vm.summary.guest.ipAddress self.log.info("Get VM ip {} by VMtools".format(ip)) if ip != "unknown" and ip != "None": record_metric( "vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'], value, hostname=mor['hostname'], tags=['instance:%s' % instance_name, 'ip:%s' % ip, 'type:VM'] ) else: record_metric( "vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'], value, hostname=mor['hostname'], tags=['instance:%s' % instance_name, "type:VM"] ) ### <TEST-INSTRUMENTATION> self.histogram('datamonitor.agent.vsphere.metric_colection.time', t.total())
def neural_net_car(solver): car_data = load_data_set('car') car_ohe = preprocessing.OneHotEncoder() car_ohe.fit(car_data['train']['inputs'] + car_data['test']['inputs']) # encode features as one-hot clf = neural_network.MLPClassifier(solver=solver, warm_start=True, max_iter=1000) with Timer() as t: clf.fit(car_ohe.transform(car_data['train']['inputs']), car_data['train']['outputs']) time_to_fit = t.interval * 1000 predicted = clf.predict(car_ohe.transform(car_data['train']['inputs'])) train_f1_score = metrics.f1_score(car_data['train']['outputs'], predicted, average='micro') with Timer() as t: predicted = clf.predict(car_ohe.transform(car_data['test']['inputs'])) test_f1_score = metrics.f1_score(car_data['test']['outputs'], predicted, average='micro') test_prediction_runtime = t.interval * 1000 data_in = car_ohe.transform(car_data['train']['inputs'] + car_data['test']['inputs']) data_out = car_data['train']['outputs'] + car_data['test']['outputs'] t_out = car_data['test']['outputs'] accuracy = accuracy_score(t_out, predicted) * 100 precision = precision_score(t_out, predicted, average="weighted") * 100 print("car.dataset (solver={})".format(solver)) print("training f1 score:", train_f1_score) print("test f1 score:", test_f1_score) print("time to fit:", time_to_fit) print("test prediction runtime:", test_prediction_runtime) print("test accuracy", accuracy) print("test precision", precision) print() skplt.estimators.plot_learning_curve( clf, data_in, data_out, title="Learning Curve: Neural Net (car.dataset, solver={})".format( solver), cv=5) plt.savefig('out/neural_net/car-solver-{}.png'.format(solver))
def search(self, image_path, do_detection=1, k=50): #queryImage = cv2.imread(image_path) t1 = Timer() t1.tic() #queryFeatures = descriptor.get_descriptor(image_path, multi_box=False) queryFeatures, label = descriptor.get_descriptor(image_path, multi_box=False, get_label=True, do_detection=do_detection) flag = [] #flag = [] # if do, we donot use class to filter result t1.toc('Feature Extraction time: ') t2 = Timer() t2.tic() #p = Profile() #results = p.runcall(self.searcher.search, queryFeatures) #p.print_stats() results, dists = self.searcher.search(queryFeatures) print dists t2.toc('Knn search time: ') result = [] # origine image #result.append(image_path) if len(flag) != 0: for j in xrange(0, k): imageName = results[j] if imageName not in result: #Juge class error but image similarity is high if dists[j] < 0.05: result.append(imageName) continue #if dists[j] > 0.2: # break #judge wether image belongs to the class image_path = imageName.split('/') image_dir = image_path[0]+'/'+image_path[1]+'/'+image_path[2] #print image_dir if image_dir in flag: result.append(imageName) #else: # result.append(imageName) print 'total result', len(result) if len(result)<3: # if result about class is less than 5, we do search in all datasets #print 'total result', len(result) k = 30 result = [] for j in xrange(0, k): imageName = results[j] if imageName not in result: #if dists[j] > 0.2: # break result.append(imageName) return result
def setup(self): super().setup() self.img_size = self.options.hpe.img_size self.speed_diagnose = self.options.general.speed_diagnose self.model = self.make_model() self.heatmap_max = 1 self.last_results = None self.timer = Timer()
def enryptandsendDB(self, bfs_DB): # this is the number of bits we need to reserve in each slot to avoid overflows self.bl = int(ceil(log(self.Q_size * self.k, 2))) keylen = log(long(self.p.pubkey['n']), 2) s = int(keylen / self.bl) p = int(ceil(float(self.n) / s)) print " Packing {}x{} patient DB into {} packs of {} CTs (each CT packs max {} entries)".format( self.n, self.l, self.l, p, s) pool = multiprocessing.Pool(self.Cc) packit = partial(pack_and_enc, HE=self.p, l=self.bl) if self.chunksize == 0: self.chunksize = bfs_DB[0].length() nchunks = int(ceil(float(bfs_DB[0].length()) / self.chunksize)) # Tell server how many chunks to expect self.peer.send(nchunks) self.benchmarks['c_db_encrypt'] = [0, 0, 0] self.benchmarks['c_db_upload'] = [0, 0, 0] for nchunk in range(nchunks): # Encrypt DB chunk with Timer(logstring=" Encrypted DB chunk {}/{}".format( nchunk + 1, nchunks)) as t: enc_bfs_DB = pool.map( packit, islice(izip(*bfs_DB), nchunk * self.chunksize, (nchunk + 1) * self.chunksize)) self.benchmarks['c_db_encrypt'][0] += t.secs self.benchmarks['c_db_encrypt'][2] = t.mem # Measure total size in memory db_chunk_size = 0 for col in enc_bfs_DB: for e in col: db_chunk_size += sys.getsizeof(e) db_chunk_size += 1024 / 8. db_chunk_size += sys.getsizeof(col) db_chunk_size += sys.getsizeof(enc_bfs_DB) self.benchmarks['c_db_encrypt'][1] += db_chunk_size # Send to server with Timer(logstring=" Sent DB chunk {}".format(nchunk)) as t: snd_bytes = self.peer.send(enc_bfs_DB) self.benchmarks['c_db_upload'][0] += t.secs self.benchmarks['c_db_upload'][1] += snd_bytes self.benchmarks['c_db_upload'][2] = t.mem # Delete DB for l in enc_bfs_DB: del l[:] del enc_bfs_DB[:]
def __init__(self, img, scalar, time, onColor=(230, 0, 0), offColor=(30, 30, 30)): """'time' in ms.""" size = tuple(int(scalar*x) for x in img.get_size()) Timer.__init__(self, time, ANSWER_TIMEOUT) JeopGameSurface.__init__(self, size) self._front = pygame.transform.smoothscale(img, size) self.offColor = offColor self.onColor = onColor self.dirty = 0 self._draw_off()
class FoundState(object): def __init__(self): #after path has not been seen for 2 secs, quit self.diceLost = Timer(LOSTTIME) self.centers = [] self.pastDice = False sw3.Forward(.1).start() def processFrame(self, frame): print "found state" dice = vision.ProcessFrame(frame) if dice.found: print "found dice" self.diceLost.restart() (x, y, _) = dice.closestLoc(frame) h,w,_ = frame.shape heightError = h/2 - y print('modifying depth by: %.3f' % (heightError / PIXTODEPTH)) sw3.RelativeDepth(heightError / PIXTODEPTH).start() print "x is : ", x widthError= x - w/2 print "w is : ", widthError print('turning: %.3f' % (widthError / PIXTODEPTH)) if widthError > 0: print "<<" sw3.RelativeYaw( .0001).start() else: print ">>" sw3.RelativeYaw( -.0001 ).start() #elif not self.diceLost.timeLeft(): # """if the dice has been lost for too long go to path lost state""" # return LostState() if not self.diceLost.timeLeft(): print "stopping seawolf" sw3.RelativeDepth(0).start() sw3.Strafe(0).start() self.pastDice = True print "ret found" return self def cont(self): #path missions never stops while we see path return not self.pastDice
def generate_sudoku(self, target = 25): search = AStarSearch() base_sudoku = self.generate_full_sudoku() timer = Timer() if self.__kind == 'reverse': problem = ReverseSudokuGenerationProblem(Sudoku(), target, self.solver) else: problem = SudokuGenerationProblem(base_sudoku, target, self.solver) timer.start() node, cnt_explored = search.search(problem, h = lambda n: problem.value(n.state)) time = timer.stop() return node.state, len(node.state), cnt_explored, time
class SearchState(object): def __init__(self): self.timer = Timer(SEARCHTIME) self.foundCounter = 4 def processFrame(self, frame): path = vision.ProcessFrame(frame) print path.found if path.found: frame = path.draw(frame) self.foundCounter -= 1 if self.foundCounter <= 0: #closest point to center is start point h,w,_ = frame.shape pt1, pt2 = [[path.p1x, path.p1y], [path.p2x, path.p2y]] dist1 = math.sqrt( (w/2 - pt1[0]) ** 2 + (h/2 - pt1[1]) ** 2 ) dist2 = math.sqrt( (w/2 - pt2[0]) ** 2 + (h/2 - pt2[1]) ** 2 ) if dist1 < dist2: return FoundState(pt1, pt2) else: return FoundState(pt2, pt1) return self def cont(self): """ if true continue mission, false end mission""" return self.timer.timeLeft()
class SearchState(object): def __init__(self): self.timer = Timer(SEARCHTIME) self.foundCounter = 4 def processFrame(self, frame): path = vision.ProcessFrame(frame) print "search state" print path.found if path.found: frame = path.draw(frame) self.foundCounter -= 1 if self.foundCounter <= 0: #closest point to center is start point h,w,_ = frame.shape pt1, pt2 = [[path.p1x, path.p1y], [path.p2x, path.p2y]] center = (path.cx, path.cy) #ideal angle is the angle of the end plank angle1 = getAngleFromCenter(center, pt1) angle2 = getAngleFromCenter(center, pt2) if abs(angle1) < abs(angle2): return TurnState(pt2, pt1) else: return TurnState(pt1, pt2) return self def cont(self): """ if true continue mission, false end mission""" return self.timer.timeLeft()
def _collect_metrics_atomic(self, instance, mor): """ Task that collects the metrics listed in the morlist for one MOR """ ### <TEST-INSTRUMENTATION> t = Timer() ### </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=mor['mor'], metricId=mor['metrics'], intervalId=mor['interval'], format='normal') results = perfManager.QueryPerf(querySpec=[query]) if results: for result in results[0].value: if result.id.counterId not in self.metrics_metadata[i_key]: self.log.debug("Skipping this metric value, because there is no metadata about it") continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) # Metric types are absolute, delta, and rate metric_name = self.metrics_metadata[i_key][result.id.counterId]['name'] if metric_name not in ALL_METRICS: self.log.debug(u"Skipping unknown `%s` metric.", metric_name) continue tags = ['instance:%s' % instance_name] if not mor['hostname']: # no host tags available tags.extend(mor['tags']) # vsphere "rates" should be submitted as gauges (rate is # precomputed). self.gauge( "vsphere.%s" % metric_name, value, hostname=mor['hostname'], tags=['instance:%s' % instance_name] ) ### <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
class FoundState(object): def __init__(self): #after gate has not been seen for 2 secs, quit self.gateLost = Timer(2) self.centers = [] def processFrame(self, frame): print "found state" gate = vision.ProcessFrame(frame) if gate.found: print "gate found" self.gateLost.restart() """ finding out how many pixels from the center the gate is the center obj of the gate is the number or pixels over the gate is. Subtracting the middle pixel index from it returns a pos value if the gate is to left and pos value if the gate is to the right """ _, w, _ = frame.shape center = w/2.0 - gate.cp print("got center %d" % center) self.centers.insert(0, center) centers = 0 for i in self.centers: centers += i center = float(centers)/len(self.centers) print(center) print self.centers if len(self.centers) > 10: self.centers.pop() print self.centers #if less than set difference ignore it center = center if center > SIGPIX else 0 sw3.RelativeYaw(center / PIXTODEG).start() elif not self.gateLost.timeLeft(): """if the gate has been lost for too long go to gate lost state""" return GateLostState() print "ret found" return self def cont(self): #gate missions never stops while we see gate return True
class PathLostState(object): def __init__(self): self.stopTime = Timer(RECKONTIME) def processFrame(self, frame): return self def cont(self): return self.stopTime.timeLeft()
def __init__(self, config, network): set_language(config.get('language')) self.network = network self.config = config self.windows = [] self.efilter = OpenFileEventFilter(self.windows) self.app = QApplication(sys.argv) self.app.installEventFilter(self.efilter) self.timer = Timer() self.app.connect(self.app, QtCore.SIGNAL('new_window'), self.start_new_window)
def search(self, image_path, do_detection=0, k=20): t1 = Timer() t1.tic() #queryFeatures = descriptor.get_descriptor(image_path, multi_box=False) queryFeatures = descriptor.get_descriptor(image_path,do_detection=do_detection) t1.toc('Feature Extraction time: ') t2 = Timer() t2.tic() results, dists, ind = self.searcher.search(queryFeatures,k=k) #self.queryExpansion(results, dists, ind) #self.queryExpansion(results, dists, ind) t2.toc('Knn search time: ') result = [] dist = [] for j,imageName in enumerate(results): if imageName not in result: result.append(imageName) dist.append(dists[j]) return result[:k],dist[:k]
def tick(blackboard): # Update all blackboard dependent helper modules. Global.update(blackboard) TeamStatus.update(blackboard) FieldGeometry.update(blackboard) Timer.update(blackboard) Sonar.update(blackboard) global skill_instance if not skill_instance: skill = blackboard.behaviour.skill # Load the module and the class we're going to use. found_skill = False SkillClass = None behaviour_packages = [ "roles", "skills", "test" ] for package in behaviour_packages: if skill not in [name for _, name, _ in pkgutil.iter_modules(["/home/nao/data/behaviours/%s" % package])]: Log.info("%s wasn't in %s, skipping import attempt.", skill, package) continue try: skill_module = __import__("%s.%s" % (package, skill), fromlist=[skill]) # Access the class so we can do some reflection. SkillClass = getattr(skill_module, skill) found_skill = True Log.info("Successfully imported %s from %s.%s", skill, package, skill) break except ImportError, e: Log.error("%s %s", package, e) Log.error(traceback.format_exc()) if not found_skill: raise ImportError("Could not find skill: %s in any of our behaviour folders." % skill) if issubclass(SkillClass, BehaviourTask): new_world = world.World(blackboard) # It's a whole new world. skill_instance = SkillClass(new_world) else: parentSkill = DummySkill(blackboard) skill_instance = SkillClass(blackboard, parentSkill)
class FirstTurnState(object): def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start() sw3.Strafe(0).start() def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ finding out where the start of the path is. This is the path end point closest to the center of the camera """ #pt1, pt2 = path.endPoints() pts = [[path.p1x, path.p1y], [path.p2x, path.p2y]] self.startPt, self.endPt = sortPoints(self.startPt, pts) center = (path.cx, path.cy) angle = turnParallelTo(self.startPt, center) print "Angle: %d" % angle if abs(angle) <= MAXANGLEDIF: sw3.RelativeYaw(0).start() return ToCenterState(self.startPt, self.endPt) elif not self.pathLost.timeLeft(): """if the path has been lost for too long go to path lost state""" return PathLostState() print "ret found" return self def cont(self): #path missions never stops while we see path return True
class FoundState(object): def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start() def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ finding out where the start of the path is. This is the path end point closest to the center of the camera """ #pt1, pt2 = path.endPoints() pts = [[path.p1x, path.p1y], [path.p2x, path.p2y]] self.startPt, self.endPt = sortPoints(self.startPt, pts) cx, cy = (path.cx, path.cy) if moveTo(frame, self.startPt) <= MAXDIST: return FirstTurnState(self.startPt, self.endPt) elif not self.pathLost.timeLeft(): """if the path has been lost for too long go to path lost state""" return PathLostState() print "ret found" return self def cont(self): #path missions never stops while we see path return True
class FoundState(object): def __init__(self): #after wheel has not been seen for 8 secs, quit self.wheelLost = Timer(8) #timer for being centered on the wheel self.centeredTimer = Timer(2) def processFrame(self, frame): print "found state" wheel = vision.ProcessFrame(frame) if wheel.found: print "wheel found" self.wheelLost.restart() """ finding out how many pixels from center of down camera the wheel is Finds difference between wheel's center in screen space and center of the screen, then moves robot to cover that distance. """ (x, y) = wheel.loc() h,w,_ = frame.shape heightError = h/2 - y print('Height error: %.3f' % heightError) widthError= x - w/2 print('Width error: %.3f' % widthError) distance = math.sqrt(heightError ** 2 + widthError ** 2) #excluding depth print("Distance from center of wheel: %.3f" % distance) """ Robot moves to center itself on the wheel until it has been centered within DISTANCE_ERROR's threshhold long enough. """ print('moving forward by: %.3f' % (heightError / PIXTODEPTH)) sw3.Forward(heightError / PIXTODEPTH).start() print('setting strafe to: %.3f' % (widthError / PIXTODEPTH)) sw3.Strafe(widthError / PIXTODEPTH).start() """Restart the timer for being centered on the wheel""" if not distance <= DISTANCE_ERROR: self.centeredTimer.restart() if not self.centeredTimer.timeLeft(): sw3.Forward(0).start() sw3.Strafe(0).start() return CenteredState() elif not self.wheelLost.timeLeft(): """if the wheel has been lost for too long go to lost state""" return WheelLostState() print "ret found" return self def cont(self): #wheel missions never stops in this state, only in lost or centered states return True
class ToEndState(object): def __init__(self, startPt, endPt): #after path has not been seen for 2 secs, quit self.pathLost = Timer(LOSTTIME) self.centers = [] self.startPt = startPt self.endPt = endPt sw3.Forward(0).start() self.atEnd = False def processFrame(self, frame): print "found state" path = vision.ProcessFrame(frame) if path.found: print "path found" self.pathLost.restart() """ moving to the end of the path. """ pts = [[path.p1x, path.p1y], [path.p2x, path.p2y]] self.startPt, self.endPt = sortPoints(self.startPt, pts) center = (path.cx, path.cy) if moveTo(frame, self.endPt) <= MAXDIST: self.atEnd = True elif not self.pathLost.timeLeft(): """if the path has been lost for too long go to path lost state""" return PathLostState() print "ret found" return self def cont(self): #path missions never stops while we see path return not self.atEnd
def try_connect(self): 'Do the connection routine.' addr = self._ips[self.attempts] log.warning('tryconnect: %r', (addr,)) self.attempts += 1 self.timeout = Timer(self.timetowait, lambda s=self.socket: self.handle_timeout(s)) self.make_socket() if self.timeout is not None: self.timeout.start() def succ(*a, **k): log.info("WIN") def fail(*a, **k): log.info("FAIL") self.connect(addr, success=succ, error=fail)
def tryaccept(self, addr, on_connect, on_fail, timeout = 1.5): self._accepting = True info('tryaccept Y=%r, N=%r', on_connect, on_fail) self.on_connect = on_connect self.on_fail = on_fail info('listening for a connection at %r', (addr,)) self.make_socket() common.socket.bind( self, addr ) self.listen(1) if timeout: info('timeout in %r secs', timeout) self.timeout = Timer(timeout, lambda s=self.socket: self.handle_timeout(s)) self.timeout.start()
class SearchState(object): def __init__(self): self.timer = Timer(SEARCHTIME) self.foundCounter = 4 def processFrame(self, frame): path = vision.ProcessFrame(frame) print path.found if path.found: frame = path.draw(frame) self.foundCounter -= 1 if self.foundCounter <= 0: return FoundState() return self def cont(self): """ if true continue mission, false end mission""" return self.timer.timeLeft()