def test_getBestResultsWithRomanNumerals(self): results = [{'SearchKey': ['Tekken 2']}, {'SearchKey': ['Tekken 3']}, {'SearchKey': ['Tekken IV']}] gamename = 'Tekken II' m = Matcher() x = m.getBestResults(results, gamename) self.assertEquals(x.get('SearchKey')[0], 'Tekken 2')
def main(): read = ReadTeams() teams = read.read_csv(sys.argv[1]) mentor_names = get_mentors() matched_teams = Matcher(teams, mentor_names) list_of_mentors = matched_teams.match_mentors() file = open('mentors.html', 'w') html = '' html += '<table border="1">\n' html += ' <tr>\n' html += ' <th>Mentors</th>\n' html += ' <th colspan="5">Teams</th>\n' html += ' <th colspan="100%">Teams on Queue</th>\n' html += ' </tr>\n' for mentor in list_of_mentors: html += ' <tr>\n' html += ' <th>{}</th>\n'.format(mentor) for team in mentor.teams: html += ' <td><p>{}</p><p>{}</p></td>\n'.format(team, team.room) html += ' </tr>\n' html += '</table>' file.write(html) file.close()
def test_getBestResultsWithBrackets(self): results = [{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}] gamename = 'FIFA \'98 (1998) [Electronic Arts]' m = Matcher() x = m.getBestResults(results, gamename) self.assertEquals(x.get('SearchKey')[0], 'FIFA 98')
def test_getBestResultsWithApostropheAndYear(self): results = [{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}] gamename = 'FIFA \'98' m = Matcher() x = m.getBestResults(results, gamename) self.assertTrue(x.get('SearchKey')[0] == 'FIFA 98', "Expected to match title (was {0})".format(x.get('SearchKey')[0]))
class TestMatcher(unittest.TestCase): def setUp(self): self.m = Matcher('AAGCAGTGGTATCAACGCAGAGTACGCGGG') def test1(self): self.assertEquals('CAACT',(self.m.match('CAACTCCCGCGTACTCTGCGTTGATACCACTGCTTACTCT'))) def test2(self): self.assertEquals('' ,(self.m.match('GTTGATACCGCTGCTTACTCTGCGTTGATACCACTGCTT'))) def test3(self): self.assertEquals('GACGACTGATCGATC',(self.m.match('GACGACTGATCGATCTACTCTGCGTTGATACCACG'))) def test4(self): self.assertEquals('GCTACTGATCATATCGTTAGCTAGCTAGCTACGT',(self.m.match('GCTACTGATCATATCGTTAGCTAGCTAGCTACGTCGTACT'))) def test5(self): self.assertEquals('GAGCCAGGCT',(self.m.match('GCAGTGGTATCAACGCAGAGTACGCGGGGAGCCAGGCT'))) def test6(self): self.assertEquals('AAGCAGTGGTATCAACGCAGA',(self.m.match('CAACGCAGAGTACGCGGGAAGCAGTGGTATCAACGCAGA'))) def test7(self): self.assertEquals('TCT',(self.m.match('AGCTAGCTAGCTAGCTAATCAACGCAGAGTACGCGGGTCT'))) def test8(self): self.assertEquals('AGCCGTGGTATCAACGCA',(self.m.match('GCAGTGGTATCAACGCAGAGTAAGCCGTGGTATCAACGCA')))
class MatchTagViewActivatable(GObject.Object, Gedit.ViewActivatable): __gtype_name__ = "MatchTagViewActivatable" view = GObject.property(type=Gedit.View) def __init__(self): GObject.Object.__init__(self) self.lang = None self.handler_cursor_moved = None self.handler_notify_lang = None self.matcher = None self.tag_match = None def do_activate(self): buf = self.view.get_buffer() self.tag_match = buf.create_tag('tag-match', background=TAG_BACKGROUND) self.matcher = Matcher(self.view, self.tag_match) self.update_language() self.handler_notify_lang = buf.connect('notify::language', self.on_notify_language) def do_deactivate(self): buf = self.view.get_buffer() buf.disconnect(self.handler_notify_lang) if self.handler_cursor_moved: buf.disconnect(self.handler_cursor_moved) matcher = None def do_update_state(self): pass def update_language(self): buf = self.view.get_buffer() lang = buf.get_language() self.lang = lang.get_id() if lang else None if not self.lang or self.lang not in SUPPORTED_LANGUAGES: if self.handler_cursor_moved: buf.disconnect(self.handler_cursor_moved) return self.handler_cursor_moved = buf.connect('cursor-moved', self.on_cursor_moved) def on_cursor_moved(self, buf): if self.matcher: self.matcher.cursor_moved() def on_notify_language(self, buf, spec): self.update_language()
def do_activate(self): buf = self.view.get_buffer() self.tag_match = buf.create_tag('tag-match', background=TAG_BACKGROUND) self.matcher = Matcher(self.view, self.tag_match) self.update_language() self.handler_notify_lang = buf.connect('notify::language', self.on_notify_language)
def _prepare_device (self): #print("device: %s" % self._profile.device.name) self._event_status = {key:0 for key in self._profile.device.get_events_supported()} #print("Events supported",self._event_status) self._allowed_event_types = set([getattr(ecodes,i[-1].split(':')[0]) for i in self._event_status]) self._input_devices = map(InputDevice, self._profile.device.devices) self._file_descriptors = { dev.fd: i for i, dev in enumerate(self._input_devices) } print("Mappings for '" + self._profile.name+"' " + '-'*30) for k,v in sorted(self._profile.mapping.items()): print(str(k.replace("EV_KEY:KEY_","").replace("ENTER","<-'").replace("APOSTROPHE","'").replace(",","")).lower().rjust(20)+" as "+(', '.join(v)).replace("r_hand_tap_", "").replace("_vs_thumb", "")) # Prepare virtual device for event injection capabilities = {} for ev_chain in self._profile.mapping: for k in re.split('[+,]', ev_chain): et, ec = k.split(':', 1) etype = ecodes.ecodes[et] if etype in capabilities: capabilities[etype].append(ecodes.ecodes[ec]) else: capabilities[etype] = [ ecodes.ecodes[ec] ] #print("Capabilities", capabilities) self._virtual_input = UInput(events = capabilities) # Prepare matcher self._matcher = Matcher(self._profile)
def matchCommand(string, options): # Handle keywords (play, pause, stop, jumpto) keywordMap = { 'play': player.play, 'pause': player.pause, 'stop': player.stop, 'jumpto': player.jumpTo, 'reboot': system.reboot, } command = keywordMap.get(string) # Catch keywords if (command): command(options=options) else: # Default to finding files through the Matcher class m = Matcher(string, options) m.matchVideo()
def __init__(self,checkRuns={},parent=None): QObject.__init__(self, parent) if(not checkRuns): self.runs = {} self.regexes = GameDBRegex() self.dats = [] self.scrapers = [] self.exporter = Exporter() self.matcher = Matcher() self.patcher = Patcher('patches.xlsx') self.database = Database()
def main(): seq = [] images = glob.glob(path + '*.tif') for i in images: image = cv2.imread(i, cv2.IMREAD_GRAYSCALE) seq.append(image) preprocessor = Preprocessor(seq) detector = Detector(preprocessor) matcher = Matcher(detector) drawer = Drawer(matcher, preprocessor) masks = preprocessor.get_masks() print('Generating all frames and cell states...') drawer.load() print('Successfully loaded all images') # Save all generated images and their masks to disk counter = 1 for g in drawer.get_gen_images(): annotated = cv2.imwrite(path + f'gen/{counter}.tif', g) mask = cv2.imwrite(path + f'gen/{counter}_mask.tif', masks[counter - 1]) if not annotated or not mask: print(f'Failed to save') counter += 1 print('Saved all images') # Now standby for user to issue commands for retrieval while True: string = input( 'Input a frame and cell ID (optional) separated by a space...\n') if string: string = string.split(' ') frame = int(string[0]) if len(string) > 1: try: id = int(string[1]) display_image = drawer.serve(frame, id) except ValueError: print(f'Not an integer') display_image = drawer.serve(frame) else: display_image = drawer.serve(frame) # plt.imshow(display_image) # plt.axis('off') # plt.show() # cv2.imshow('image',display_image) # cv2.waitKey(0) # cv2.destroyAllWindows() else: break
def run(): images_path = 'imgs/' files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))] # getting 3 random images sample = random.sample(files, 3) batch_extractor(images_path) ma = Matcher('features.pck') for s in sample: print 'Query image', s, ' ==========================================' show_img(s) names, match = ma.match(s, topn=3) print "top_names:", names print 'Result images ========================================' for i in range(3): # we got cosine distance, less cosine distance between vectors # more they similar, thus we subtruct it from 1 to get match value print 'Match %s' % (1-match[i])
def get_matcher(net, opt): idxs = [x for x in opt['layers'].split(',')] # idxs = [int(x) for x in opt['layers'].split(',')] matcher = Matcher(opt['what']) def hook(module, input, output): matcher(module, output) for i in idxs: net._modules[i].register_forward_hook(hook) return matcher
class Agent: def __init__(self): self.matcher = Matcher(alpha, gamma1) self.scheduler = Scheduler(gamma2) def dispatch( self, dispatch_observ: List[Dict[str, Any]]) -> List[Dict[str, str]]: return self.matcher.dispatch(dispatch_observ) def reposition(self, repo_observ: Dict[str, Any]) -> List[Dict[str, str]]: return self.scheduler.reposition(self.matcher, repo_observ)
def make_matcher(self, fname, dirname): if self.verify_file_ext(fname): self.filename = os.path.join(dirname, fname) else: self.display_error_dlg("Invalid file selected.\nPlease select a valid excel 97-2003 file!") return if self.verify_filename(): self.m = Matcher(self.filename) self.m.dosetup()
def reposition(self, matcher: Matcher, repo_observ: Dict[str, Any]) -> List[Dict[str, str]]: if REPO_NAIVE: return Scheduler.reposition_naive(repo_observ) timestamp, day_of_week, drivers = Scheduler.parse_repo(repo_observ) grid_ids = matcher.get_grid_ids() reposition = [] # type: List[Dict[str, str]] for driver_id, current_grid_id in drivers: current_value = matcher.get_grid_value(current_grid_id) best_grid_id, best_value = current_grid_id, 0 for grid_id in grid_ids: time = Grid.mahattan_distance(current_grid_id, grid_id) / SPEED discount = math.pow(self.gamma, time) proposed_value = matcher.get_grid_value(grid_id) incremental_value = discount * proposed_value - current_value if incremental_value > best_value: best_grid_id, best_value = grid_id, incremental_value reposition.append( dict(driver_id=driver_id, destination=best_grid_id)) return reposition
def test_matcher_minmax_fixed_input(): reviewers = ['reviewer1', 'reviewer2', 'reviewer3'] papers = ['paper1', 'paper2', 'paper3'] scores = [ ('paper1', 'reviewer1', 1), ('paper1', 'reviewer2', 0), ('paper1', 'reviewer3', 0.25), ('paper2', 'reviewer1', 1), ('paper2', 'reviewer2', 0), ('paper2', 'reviewer3', 0.25), ('paper3', 'reviewer1', 1), ('paper3', 'reviewer2', 0.2), ('paper3', 'reviewer3', 0.5)] minimums = [1, 1, 1] maximums = [1, 1, 1] demands = [1, 1, 1] test_minmax_matcher = Matcher( { 'reviewers': reviewers, 'papers': papers, 'scores_by_type': {'affinity': {'edges': scores}}, 'weight_by_type': {'affinity': 1}, 'minimums': minimums, 'maximums': maximums, 'demands': demands, 'num_alternates': 1 }, solver_class = 'MinMax' ) test_minmax_matcher.run() assert len(test_minmax_matcher.solution) == 3 assert len(test_minmax_matcher.solution[0]) == 3 assert None == nptest.assert_array_equal(test_minmax_matcher.solution, [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]) assert test_minmax_matcher.assignments assert test_minmax_matcher.alternates
class SimpleDataset(Dataset): """ Basic Dataset implementation. Still not ready for final use, but it can be used as a prototype to build real datasets. """ def __init__(self, name, path, conf): Dataset.__init__(self, name, path, conf) if not "filter" in conf: raise RuntimeError("'filter' not found in the configuration of dataset '%s'" % self.name) self.filter = Matcher(conf["filter"]) needsInit = os.path.exists(self.path + "/index.sqlite") self.db = sqlite.connect(self.path + "/index.sqlite") if needsInit: cur = self.db.cursor() cur.execute(""" create table items ( id integer primary key, uid varchar(255) not null, unique(uid) ) """) def id(self, md): """ Compute the unique ID of a metadata in this dataset """ o = md.get_origins() t = md.get_reference_time_info() return ":".join(o + (str(t),)) def accepts(self, md): return self.filter.match(md) def contains(self, id): cur = self.db.cursor() cur.execute("select id from items where uid = ?", id) return cur.fetchone() != None def acquire(self, md): # Daily filename so far filename = datetime.now().strftime("%Y%m%d") dest = self.path + "/" + filename id = self.id(md) cur = self.db.cursor() # This will throw an exception if it's a duplicate insert cur.execute("insert into items (uid) values (?)", id) md.set_dataset(self.name, id) try: self._store(md, dest) except: cur.execute("delete from items where uid = ?", id) raise
def make_rpn_loss_evaluator(cfg, box_coder): matcher = Matcher( cfg.FG_IOU_THRESHOLD, cfg.BG_IOU_THRESHOLD, allow_low_quality_matches=True, ) fg_bg_sampler = BalancedPositiveNegativeSampler(cfg.BATCH_SIZE_PER_IMAGE, cfg.POSITIVE_FRACTION) loss_evaluator = RPNLossComputation(matcher, fg_bg_sampler, box_coder, generate_rpn_labels) return loss_evaluator
def match_route_dp(vehicle, routes, grids): """ dynamic programming algorithm to match routes. """ route_cands = filter_by_grids(vehicle, routes, grids) split_indices = split_vehicle_by_routes(vehicle, route_cands) # (match_routes_count, match_sites_count) # match_sited_count is compared only when # match_routes_count are equal aux = [(0, 0)] * (len(split_indices) + 1) back_points = [None] * (len(split_indices) + 1) valid = [False] * len(split_indices) for i, (j, route_no_set) in enumerate(split_indices, 1): aux[i] = aux[i - 1] back_points[i] = back_points[i - 1] cand_pts = [(0, 0)] + [ (k + 1, s + 1) for k, (s, _) in enumerate(split_indices[0:i - 1]) if valid[k] ] for route_no in route_no_set: for s, k in reversed(cand_pts): if subset_match_with_dist(vehicle, k, j, routes.get_route(route_no), grids): if 1 + aux[s][0] > aux[i][0] \ or aux[s][0] + 1 == aux[i][0] \ and aux[s][1] + routes.get_route(route_no).length() > aux[i][1]: valid[i - 1] = True aux[i] = (aux[s][0] + 1, aux[s][1] + routes.get_route(route_no).length()) back_points[i] = (s, k, route_no) matcher = Matcher(vehicle, grids) i = len(vehicle) - 1 s = len(back_points) - 1 while back_points[s] != None: s, k, route_no = back_points[s] matcher.insert(0, k, i, routes.get_route(route_no)) i = k - 1 return matcher
def main(): params = Params() if not os.path.isdir(params.dataset_root): raise Exception("Unable to load images from " + params.dataset_root + ": not a directory") if not os.path.exists(params.output_dir): os.mkdir(params.output_dir) if not os.path.isdir(params.output_dir): raise Exception("Unable to save results to " + params.output_dir + ": not a directory") if (params.dataset == "DIC-C2DH-HeLa"): path = params.dataset_root + "/" + str( list(params.images_idx.keys())[0]) elif (params.dataset == "PhC-C2DL-PSC" and params.nn_method == "DeepWater"): path = params.dataset_root + "/" + str( list(params.images_idx.keys())[0]) else: path = params.dataset_root # seq = [] images = glob.glob(path + '/*.tif') #sort the order of images images = [(int(x[-7:-4]), x) for x in images] images.sort(key=lambda x: x[0]) images = [x[1] for x in images] preprocessor = Preprocessor(images, params) detector = Detector(preprocessor) matcher = Matcher(detector) drawer = Drawer(matcher) masks = preprocessor.get_masks() counter = 1 while True: inp = input('Serving next frame... type a Cell ID to inspect details') drawer.next() try: inp = int(inp) display_image = drawer.serve(inp) except: print(f'Not an integer') display_image = drawer.serve() plt.imsave(path + f'gen/{counter}.jpg', display_image) plt.imsave(path + f'gen/{counter}_mask.jpg', masks[counter]) counter += 1
def run_trial(house_num: int, matcher_spec: MatcherSpec, aggregated, disaggregated, main_ind, data_vec, labels, always_on): disagg_settings, data_settings = load_settings(house_num) stat_log = stats.StatStore() matcher = Matcher(stat_log, len(labels) + 3, labels, always_on, matcher_spec) disaggregator: Disaggregator = PerfectDisaggregator(disagg_settings) (hist_delta_power, hist_events, current_time) = disaggregator.initialize(data_vec) event_offset = len(hist_events) #Traverse through the data each frame at a time current_frame = 0 total_frames = int((len(data_vec) - disagg_settings.init_size) / disagg_settings.frame_size) while current_time < len(data_vec): if verbose: print('\nProcessing frame {} of {} from times {} to {}'.format(current_frame, total_frames, current_time, current_time + disagg_settings.frame_size)) (frame_delta_power, frame_events) = process_frame_data(data_vec, current_time, disagg_settings) if verbose: print('\tDisaggregating appliances') hist_delta_power += frame_delta_power hist_events += frame_events # Use hist_delta_power length because it contains new data # Using current time returns a 0 length array gsp_truth = disaggregated[0:len(hist_delta_power)] gsp_results = disaggregator.process_frame(data_vec, main_ind, hist_delta_power, frame_events, disaggregated, current_time) matcher.process_frame(current_frame, current_time, disagg_settings.frame_size, gsp_results, gsp_truth) # Compute final matching gsp_results = matcher.final_matching(gsp_truth) # Compute statistics accuracy = compute_accuracy(gsp_results.columns, disaggregated.columns) stat_log.push(current_frame, 'accuracies', accuracy) if verbose: print('\tAccuracy of {:.2f}'.format(accuracy)) # Advance frame event_offset += len(frame_events) current_frame += 1 current_time += disagg_settings.frame_size #Also, some way to easily view the asked questions through histogram, table, or something #After adding nicer stats/reporting, integrate new periodicity measurement with lowest auto-correff std/avg being most periodic #Then finalize how to do the synthetic data creation and piping into here. *More notes in data_combiner.py* if verbose: matcher.print_stats(gsp_results, disaggregated) gsp_v.graph_all(aggregated, disaggregated, gsp_results) return stat_log
def home(): id = request.args.get('user_id') api = db[int(id)]['api'] user = api.me() dynamo = DynamoTable('default', 'twitty-users') if dynamo.checkUserExists(user.id) is False: print "New User" interests = processFirstTimeUser(api, dynamo) db[user.id]['interests'] = interests else: print "Existing User" interests = processExistingUser(api, dynamo) db[user.id]['interests'] = interests # do matching here now!! if 'matcher' not in db.keys(): print "Begin Matching" items = dynamo.scanTable() users = dict() for item in items: users[item['user_id']] = dynamo.formatContent(item) matcher = Matcher(items, users) matcher.doMatching() db['matcher'] = matcher db['users'] = users print "Matching Done" if session['device'] == "mobile": del session['device'] return redirect(url_for('dashboard', user_id=id, device="mobile")) else: del session['device'] return render_template('home.html', name=user.name, user_id=id)
def __init__(self, sess): """ initialize SSD model as SSD300 whose input size is 300x300 """ self.sess = sess # define input placeholder and initialize ssd instance self.input = tf.placeholder(shape=[None, 300, 300, 3], dtype=tf.float32) self.ssd = SSD() # build ssd network => feature-maps and confs and locs tensor is returned fmaps, confs, locs = self.ssd.build(self.input, is_training=True) # zip running set of tensor self.pred_set = [fmaps, confs, locs] # required param from default-box and loss function fmap_shapes = [map.get_shape().as_list() for map in fmaps] # print('fmap shapes is '+str(fmap_shapes)) self.dboxes = generate_boxes(fmap_shapes) print(len(self.dboxes)) # required placeholder for loss loss, loss_conf, loss_loc, self.pos, self.neg, self.gt_labels, self.gt_boxes = self.ssd.loss( len(self.dboxes)) self.train_set = [loss, loss_conf, loss_loc] # optimizer = tf.train.AdamOptimizer(0.05) optimizer = tf.train.AdamOptimizer(learning_rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam') self.train_step = optimizer.minimize(loss) # provides matching method self.matcher = Matcher(fmap_shapes, self.dboxes)
def __init__(self, *args, **kwargs): self._config = kwargs.get('config', None) or Config() self._app = kwargs.get('app', None) or Flask(self.__class__.__name__) self._matcher = kwargs.get('matcher', None) or Matcher() # do nothing by default self._default_handler = lambda x: None self._provider = (kwargs.get('provider', None) or self._get_provider_from_config()) self._app.add_url_rule(self._config.provider['url'], view_func=self._sms_handler, methods=self._config.provider['methods'])
def _scan_path(self): self._scan_down_set(False) self._clear_paths() M = Matcher(asterisk_match_len_blacklist=tuple()) for p in self._env_paths: try: ls = os.listdir(p) except Exception as ex: continue for pp in ls: abspath = os.path.join(p, pp) if os.path.isdir(abspath): self._all_dirs.append(abspath) elif os.path.isfile(abspath): for suf in self._allow_suffixs: M.set_substr(suf) if M.is_match(os.path.splitext(pp)[1]): self._all_files.append(abspath) # if os.path.splitext(pp)[1] in self._allow_suffixs: # self._all_files.append(abspath) self._all_dirs = list(set(self._all_dirs)) self._all_files = list(set(self._all_files)) self._scan_down_set(True)
def __init__(self, f): self.f = f self.timing = False self.mode = None self.verbose = 0 self.failures = 0 time.sleep(2) self.resp_matcher = Matcher(r''' ((?P<key>[A-Z0-9/.]+):)? (?P<value> ("[^"]*") | ([^,]*) ) ''', re.VERBOSE) self.sfr_sma_matcher = Matcher('SFREQUENCY (?P<freq>[-+.0-9E]+),EQ;' + 'SMAGNITUDE (?P<mag>[-+.0-9E]+),EQ') self.init()
def __init__(self, camera_params, odometry_poses): self.prev_frame_extracts = None self.prev_frame = None self.E = None self.F = None self.t = np.zeros(3) self.R = np.eye(3) self.kpe = KeyPointsExtractor(extractor='orb', detector='fast', num_points=5000, quality=0.001, min_dist=3) self.matcher = Matcher() self.tracker = Tracker() self.w = camera_params['frame_width'] self.h = camera_params['frame_height'] # self.f = camera_params['focal_length'] self._init_camera_intrinsic(fx=718.8560, fy=718.8560, cx=607.1928, cy=185.2157) with open(odometry_poses) as f: self.odometry_poses = f.readlines()
def __init__(self, name, path, conf): Dataset.__init__(self, name, path, conf) if not "filter" in conf: raise RuntimeError("'filter' not found in the configuration of dataset '%s'" % self.name) self.filter = Matcher(conf["filter"]) needsInit = os.path.exists(self.path + "/index.sqlite") self.db = sqlite.connect(self.path + "/index.sqlite") if needsInit: cur = self.db.cursor() cur.execute(""" create table items ( id integer primary key, uid varchar(255) not null, unique(uid) ) """)
def to_fpga(rx): icestick = IceStick() icestick.Clock.on() icestick.D1.on() main = icestick.DefineMain() rom = string_to_rom('x' * 16) matcher = Matcher(rx) m.wire(rom, matcher.char) m.wire(matcher.match, main.D1) m.EndDefine() m.compile('regulair', main)
def main(): from matcher import Matcher from interface import Interface import attribute_names if os.path.exists(case_filename): with open(case_filename, "rb") as fp: ranges, cases = pickle.load(fp) for k, v in ranges.items(): atrcls = getattr(attribute_names, k) atrcls._range = v else: print("Warning: No cases found (looking in '%s').") % case_filename cases = [] matcher = Matcher(cases) interface = Interface(matcher) interface.cmdloop()
def warui(lang, term, k=1): """Find bad words that matches input Args: lang: Language code such as en, es, fr, jp term: Input word k: Maximum edit distance Yields: Number of matches, matched words and probes as Tuple """ if not isinstance(term, unicode): term = term.decode('utf-8') words = get_words(lang) m = Matcher(words) li = list(automata.find_all_matches(term, k, m)) return (len(li), li, m.probes)
def _clear_paths(self): while '' in self._paths: self._paths.remove('') tmp = [] for p in self._paths: if os.path.exists(p): tmp.append(p) self._paths = list(set(tmp)) #匹配黑名单 if len(self._dir_black_list) == 0: return Ms = [ Matcher(b, asterisk_match_len_blacklist=tuple()) for b in self._dir_black_list ] tmp = [] for p in self._paths: if True not in (M.is_match(p) for M in Ms): tmp.append(p) self._paths = list(set(tmp))
def make_roi_box_loss_evaluator(cfg): from matcher import Matcher from sampler import BalancedPositiveNegativeSampler matcher = Matcher( cfg.ROI_HEADS.FG_IOU_THRESHOLD, cfg.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False, ) bbox_reg_weights = cfg.ROI_HEADS.BBOX_REG_WEIGHTS box_coder = BoxCoder(weights=bbox_reg_weights, lib=torch) fg_bg_sampler = BalancedPositiveNegativeSampler( cfg.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.ROI_HEADS.POSITIVE_FRACTION) cls_agnostic_bbox_reg = cfg.CLS_AGNOSTIC_BBOX_REG loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder, cls_agnostic_bbox_reg) return loss_evaluator
def main(): #src = os.path.expanduser('~/Videos/VID_20190327_194904.mp4') src = 1 trk = None path = [] iter = 0 scale = 1. vita = VitaminE(dbg=True) cam = cv2.VideoCapture(src) # init gui cv2.namedWindow('win', cv2.WINDOW_NORMAL) #lset_fn = lambda x: vita.set_lmd(np.log(x-10.0)) def lset_fn(x): vita.set_lmd(np.exp(x-20.0)) cv2.createTrackbar('lambda', 'win', 10, 20, lset_fn) lset_fn(cv2.getTrackbarPos('lambda', 'win')) matcher = Matcher() img = None while True: ret, img = cam.read(img) if not ret: break img = cv2.resize(img, None, fx=scale, fy=scale) data = {} vita(img, data) if ('track-img' in data): viz = data['track-img'] #cv2.imwrite('/tmp/frame{:04d}.png'.format(iter), (viz*255).astype(np.uint8) ) cv2.imshow('win', viz) k = cv2.waitKey(1) if k in [27, ord('q')]: break if k in [ord('r')]: vita.reset() iter += 1
def run(self, ips, imgs, para=None): ips1 = WindowsManager.get(para['img1']).ips ips2 = WindowsManager.get(para['img2']).ips detector = cv2.SURF(hessianThreshold=para['thr'], nOctaves=para['oct'], nOctaveLayers=para['int'], upright=para['upright'], extended=para['ext']) kps1, feats1 = detector.detectAndCompute(ips1.get_img(), None) kps2, feats2 = detector.detectAndCompute(ips2.get_img(), None) dim, std = { 'None': 0, 'Affine': 6, 'H**o': 8 }[para['trans']], para['std'] / 100.0 style = para['style'] == 'Blue/Yellow' idx, msk, m = Matcher(dim, std).filter(kps1, feats1, kps2, feats2) picker1 = Pick(kps1, kps2, idx, msk, ips1, ips2, True, style) picker2 = Pick(kps1, kps2, idx, msk, ips1, ips2, False, style) ips1.tool, ips1.mark = picker1, picker1 ips2.tool, ips2.mark = picker2, picker2 if para['log']: self.log(kps1, kps2, msk, m, dim) ips1.update, ips2.update = True, True
def _detect_py_ver(self, filename): '''判断一个文件是py2还剩py3''' if not os.path.exists(filename): raise Exception('文件不存在') with open(filename, 'r', encoding='utf-8') as f: data = f.read() if 'print "' in data or "print '" in data or 'exec ' in data or 'xrange' in data or 'raw_input' in data: return 2 elif 'print(' in data or 'print (' in data or 'exec(' in data: return 3 else: M = Matcher('*except *,*:*', tuple()) if True in (M.is_match(l) for l in data.split('\n')): return 2 M.set_substr('*except * as *:*') if True in (M.is_match(l) for l in data.split('\n')): return 3 return False
class VitaminE(object): def __init__(self, lmd=0.001, dbg=True): self.matcher_ = Matcher(dbg=dbg) self.lmd_ = lmd self.db_ = [] # tracking data self.trk_ = None self.path_ = [] self.cols_ = None # debugging flag self.dbg_ = dbg self.reset() # properties / parameter setting def set_lmd(self, lmd): print('lambda : {}'.format(lmd)) self.lmd_ = lmd def reset(self): # reset all **data** properties self.db_ = [] self.trk_ = None self.path_ = [] self.cols_ = None def __call__(self, img, data={}): """ Run Vitamin-E on RGB image """ kappa = curvature(img / 255.0) knorm = np.linalg.norm(kappa, axis=-1) max_msk, idx = local_maxima(knorm) self.db_.append( (img, idx) ) if len(self.db_) <= 1: return True mdata = self.matcher_.match(self.db_[-2], self.db_[-1], scale=1.0, data=data) T_A, T_b = get_dominant_motion(mdata) if self.trk_ is None: # initialize track with initial extrema points self.trk_ = self.db_[-2][1] self.path_ = self.trk_[None, :] self.cols_ = np.random.uniform(0, 255, (len(self.trk_),3)) self.trk_, good = vitatrack(self.trk_, knorm, T_A, T_b) #trk, good = lktrack(db[-2][0], db[-1][0], trk) # append + filter data by currently active points self.path_ = self.path_[:, good] self.cols_ = self.cols_[good] self.path_ = np.append(self.path_, self.trk_[None,:], axis=0) if self.dbg_: # add visualization viz = img.copy() for p, c in zip(self.path_.swapaxes(0,1)[...,::-1], self.cols_): cv2.polylines(viz, #path.swapaxes(0,1)[...,::-1], p[None,...], False, c ) viz = cv2.addWeighted(img, 0.75, viz, 0.25, 0.0) for p, c in zip(self.trk_,self.cols_): cv2.circle(viz, (p[1], p[0]), 2, c) #viz = cv2.addWeighted(viz, 1.0, max_msk, 255.0, 0.0) viz = np.clip(viz + (max_msk * 255), 0, 255).astype(np.uint8) data['track-img'] = viz
def test_getBestResultsNonMatchingWithUnicode(self): results = [{'SearchKey': [u'スーパー競輪']}] gamename = 'Super Test Game' m = Matcher() x = m.getBestResults(results, gamename) self.assertIsNone(x, "Expected non-matching strings to not match, including unicode")
class MatchOff_Frame ( wx.Frame ): def __init__( self, main_window ): wx.Frame.__init__ ( self, main_window, id = wx.ID_ANY, title = u"Match Off Application", pos = wx.DefaultPosition, size = wx.Size( 509,498 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize ) self.SetFont( wx.Font( 11, 70, 90, 90, False, wx.EmptyString ) ) self.init_ui() self.make_menu() self.connect_events() self.Bind(EVT_MATCHOFF, self.on_update) self.colnames = list(uppercase) def init_ui(self): frame_sizer = wx.BoxSizer( wx.VERTICAL ) self.main_panel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL ) mp_sizer = wx.BoxSizer( wx.VERTICAL ) input_fg_sizer = wx.FlexGridSizer( 0, 2, 0, 0 ) input_fg_sizer.AddGrowableCol( 1 ) input_fg_sizer.SetFlexibleDirection( wx.BOTH ) input_fg_sizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED ) self.fchooser_text = wx.StaticText( self.main_panel, wx.ID_ANY, u"Select File:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.fchooser_text.Wrap( -1 ) self.fchooser_text.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) input_fg_sizer.Add( self.fchooser_text, 0, wx.ALL|wx.ALIGN_RIGHT, 5 ) self.file_chooser = wx.FilePickerCtrl( self.main_panel, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE ) self.file_chooser.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) self.file_chooser.SetToolTipString( u"Click to select a file" ) input_fg_sizer.Add( self.file_chooser, 0, wx.ALL|wx.EXPAND, 5 ) self.ccy_label = wx.StaticText( self.main_panel, wx.ID_ANY, u"Currency Column:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.ccy_label.Wrap( -1 ) self.ccy_label.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) input_fg_sizer.Add( self.ccy_label, 0, wx.ALL|wx.ALIGN_RIGHT, 5 ) self.ccy_ctrl = wx.TextCtrl( self.main_panel, wx.ID_ANY, u"G", wx.DefaultPosition, wx.DefaultSize, 0 ) self.ccy_ctrl.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) self.ccy_ctrl.SetToolTipString( u"Enter Column containing currency" ) input_fg_sizer.Add( self.ccy_ctrl, 0, wx.ALL|wx.EXPAND, 5 ) self.bal_label = wx.StaticText( self.main_panel, wx.ID_ANY, u"Balance Column:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.bal_label.Wrap( -1 ) self.bal_label.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) input_fg_sizer.Add( self.bal_label, 0, wx.ALL|wx.ALIGN_RIGHT, 5 ) self.bal_ctrl = wx.TextCtrl( self.main_panel, wx.ID_ANY, u"H", wx.DefaultPosition, wx.DefaultSize, 0 ) self.bal_ctrl.SetToolTipString( u"Enter column containing balances" ) input_fg_sizer.Add( self.bal_ctrl, 0, wx.ALL|wx.EXPAND, 5 ) mp_sizer.Add( input_fg_sizer, 0, wx.EXPAND|wx.ALL, 5 ) btn_sizer = wx.BoxSizer( wx.HORIZONTAL ) self.validate_btn = wx.Button( self.main_panel, wx.ID_ANY, u"Validate Input", wx.DefaultPosition, wx.DefaultSize, 0 ) self.validate_btn.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) self.validate_btn.SetToolTipString( u"Click to confirm your inputs are correct" ) btn_sizer.Add( self.validate_btn, 0, wx.ALL, 5 ) self.match_btn = wx.Button( self.main_panel, wx.ID_ANY, u"Match Off", wx.DefaultPosition, wx.DefaultSize, 0 ) self.match_btn.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) ) self.match_btn.Enable( False ) self.match_btn.Hide() self.match_btn.SetToolTipString( u"Click to start match off" ) btn_sizer.Add( self.match_btn, 0, wx.ALL, 5 ) self.stop_btn = wx.Button( self.main_panel, wx.ID_ANY, u"Stop Matching", wx.DefaultPosition, wx.DefaultSize, 0 ) self.stop_btn.Enable( False ) self.stop_btn.Hide() btn_sizer.Add( self.stop_btn, 0, wx.ALL, 5 ) mp_sizer.Add( btn_sizer, 0, wx.ALIGN_RIGHT, 5 ) self.output_ctrl = wx.TextCtrl( self.main_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 100,100 ), wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_WORDWRAP ) self.output_ctrl.SetFont( wx.Font( 12, 70, 90, 90, False, wx.EmptyString ) ) mp_sizer.Add( self.output_ctrl, 1, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 ) self.main_panel.SetSizer( mp_sizer ) self.main_panel.Layout() mp_sizer.Fit( self.main_panel ) frame_sizer.Add( self.main_panel, 1, wx.EXPAND |wx.ALL, 5 ) self.SetSizer( frame_sizer ) self.Layout() def make_menu(self): self.menu_bar = wx.MenuBar( 0 ) self.file_menu = wx.Menu() self.open_mitem = wx.MenuItem( self.file_menu, wx.ID_ANY, u"&Open", wx.EmptyString, wx.ITEM_NORMAL ) self.file_menu.AppendItem( self.open_mitem ) self.file_menu.AppendSeparator() self.about_mitem = wx.MenuItem( self.file_menu, wx.ID_ANY, u"&About", wx.EmptyString, wx.ITEM_NORMAL ) self.file_menu.AppendItem( self.about_mitem ) self.file_menu.AppendSeparator() self.exit_mitem = wx.MenuItem( self.file_menu, wx.ID_ANY, u"&Exit", wx.EmptyString, wx.ITEM_NORMAL ) self.file_menu.AppendItem( self.exit_mitem ) self.menu_bar.Append( self.file_menu, u"&File" ) self.tutorial_menu = wx.Menu() self.tutorial_mitem = wx.MenuItem( self.tutorial_menu, wx.ID_ANY, u"&tutorial", wx.EmptyString, wx.ITEM_NORMAL ) self.tutorial_menu.AppendItem( self.tutorial_mitem ) self.menu_bar.Append( self.tutorial_menu, u"&Tutorial" ) self.SetMenuBar( self.menu_bar ) self.statusbar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_ANY ) self.Centre( wx.BOTH ) def connect_events(self): self.validate_btn.Bind( wx.EVT_BUTTON, self.on_validate ) self.match_btn.Bind( wx.EVT_BUTTON, self.on_matchoff ) self.Bind( wx.EVT_MENU, self.on_open, id = self.open_mitem.GetId() ) self.Bind( wx.EVT_MENU, self.on_about, id = self.about_mitem.GetId() ) self.Bind( wx.EVT_MENU, self.on_exit, id = self.exit_mitem.GetId() ) self.Bind( wx.EVT_MENU, self.on_tutorial, id = self.tutorial_mitem.GetId() ) self.file_chooser.Bind( wx.EVT_FILEPICKER_CHANGED, self.on_file_chooser ) self.stop_btn.Bind( wx.EVT_BUTTON, self.on_stop ) def on_file_chooser( self, event ): f = self.file_chooser.GetTextCtrlValue() fname, dirname = os.path.basename(f), os.path.dirname(f) self.make_matcher(fname, dirname) def on_validate( self, event ): if not self.confirm_matcher(): return if not self.check_col_val(): self.display_error_dlg("Either currency or balance column entered is invalid.") return else : bal_col_num = self.colnames.index( self.col_text ) ccy_col_num = self.colnames.index( self.ccy_text ) if not (bal_col_num and ccy_col_num and bal_col_num < self.m.ncols and ccy_col_num < self.m.ncols ): self.display_error_dlg( "Either currency or balance column entered is invalid." ) return if self.make_matcher_input_cols( ccy_col_num, bal_col_num ): self.validate_btn.Disable() def on_matchoff(self, evt): if not self.confirm_matcher(): return matcher_thread = MatcherThread(self) matcher_thread.start() self.file_chooser.SetPath("") self.match_btn.Disable() self.match_btn.Hide() self.stop_btn.Enable() self.stop_btn.Show() self.Layout() def on_update(self,evt): self.output_ctrl.AppendText( evt.get_value() ) def on_open( self, event ): self.dirname = '' dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.OPEN) if dlg.ShowModal() == wx.ID_OK: fname = dlg.GetFilename() self.dirname = dlg.GetDirectory() self.make_matcher(fname, self.dirname) dlg.Destroy() def on_stop( self, event ): event.Skip() def make_matcher(self, fname, dirname): if self.verify_file_ext(fname): self.filename = os.path.join(dirname, fname) else: self.display_error_dlg("Invalid file selected.\nPlease select a valid excel 97-2003 file!") return if self.verify_filename(): self.m = Matcher(self.filename) self.m.dosetup() def on_about( self, event ): self.display_error_dlg(msg="An application to match off credits and debits", caption="Information") def on_exit( self, event ): self.Destroy() def on_tutorial( self, event ): event.Skip() def verify_file_ext(self, name): extension = os.path.splitext(name)[1] return extension.lower() == ".xls" def display_error_dlg(self, msg, caption="Error!!"): dlg = wx.MessageDialog(self, message=msg, caption=caption, style=wx.OK) dlg.ShowModal() dlg.Destroy() def verify_filename(self): if self.filename and self.verify_file_ext(self.filename): return True else: self.display_error_dlg("No file selected!") return False def validate_inputs(self): if not self.confirm_matcher(): return if not self.check_col_val(): self.display_error_dlg("Either currency or balance column entered is invalid.") return else : bal_col_num = self.colnames.index( self.col_text ) ccy_col_num = self.colnames.index( self.ccy_text ) if not (bal_col_num and ccy_col_num and bal_col_num < self.m.ncols and ccy_col_num < self.m.ncols ): self.display_error_dlg( "Either currency or balance column entered is invalid." ) return if not self.make_matcher_input_cols( ccy_col_num, bal_col_num ): return def confirm_matcher(self): self.m = getattr(self, "m", None) if not (self.m and self.m.filename): self.display_error_dlg('No Valid File Chosen. Please click "File" in the menubar and select "Open".') return False return True def check_col_val(self): col,ccy = self.bal_ctrl.GetValue(), self.ccy_ctrl.GetValue() if col and ccy: self.col_text = col.strip().upper() self.ccy_text = ccy.strip().upper() if self.col_text and self.ccy_text and self.col_text.isalpha() and self.ccy_text.isalpha()\ and self.col_text.upper() in self.colnames and self.ccy_text.upper() in self.colnames: return True else: return False def make_matcher_input_cols(self, ccy_col_num, bal_col_num): ccy = self.m.get_cell_val(4,ccy_col_num) if not ccy in self.m.FX: self.display_error_dlg("Selected Currency column is invalid.\nPlease input a valid currency column.") return False self.m.make_start_cols(ccy_col_num, bal_col_num) self.display_error_dlg("All inputs valid. Click 'Match Off' button to start") self.match_btn.Enable() self.match_btn.Show(True) self.validate_btn.Hide() self.Layout() return True
import time import cv2 from matcher import Matcher matcher = Matcher([("fau-logo", "./templates/fau-logo.png"), ("first-logo", "./templates/first-logo.jpg"), ("nextera-logo", "./templates/nextera-energy-logo.jpg"), ("techgarage-logo", "./templates/techgarage-logo.png") ], min_keypoints_pct_match=15) cam = cv2.VideoCapture(1) cnt = 0 while True: (grabbed, img) = cam.read() print matcher.match(img) cv2.imshow("Pic", img) key = cv2.waitKey(10) if key == ord('q'): break
cv2.waitKey(10) def video_start(): print("Starting video...") cv2.namedWindow("Drone") def video_end(): print("Ending video...") cv2.destroyWindow("Drone") # Have to send waitKey several times on Unix to make window disappear for i in range(1, 5): cv2.waitKey(1) matcher = Matcher([("fau-logo", "../opencv/templates/fau-logo.png"), ("first-logo", "../opencv/templates/first-logo.jpg"), ("nextera-logo", "../opencv/templates/nextera-energy-logo.jpg"), ("techgarage-logo", "../opencv/templates/techgarage-logo.png") ], min_keypoints_pct_match=10) print("Connecting to drone..") drone = Bebop() drone.video_callbacks(video_start, video_end, video_frame) drone.videoEnable() print("Connected.") for i in xrange(10000): if command is None: drone.update( ); elif command == "TAKEOFF": print("Taking offf.........................")
from mongo import Mongo from matcher import Matcher METHOD = "ML_direct_fb" # edit this depending on your need mon = Mongo() mon.connect() ############### if METHOD == "getuser_fb": (f, fd) = mon.getFacebookUser('MayorMurielBowser', returnDoc=True) elif METHOD == "getuser_tw": (t, td) = mon.getTwitterUser('PerlmanOfficial', returnDoc=True) else: matcher = Matcher(mon) # note that this takes a bit of time to load if METHOD == "direct_fb": match = matcher.findMatchForFacebookUser('Itzhakperlmanofficial', useML=False) elif METHOD == "direct_tw": match = matcher.findMatchForTwitterUser('PerlmanOfficial', useML=False) elif METHOD == "indirect_fb": match = matcher.findIndirectMatchForFacebookUser( 'Itzhakperlmanofficial', useML=False) elif METHOD == "indirect_tw": match = matcher.findIndirectMatchForTwitterUser('PerlmanOfficial', useML=False)
class HIDMapperController (object): def __init__ (self): self._profile = None self._running = False self._input_devices = None self._file_descriptors = None self._allowed_event_types = None self._event_status = None self._gesture_codes = deque() self._last_queued = None self._virtual_input = None self._matcher = None @property def profile (self, profile_obj): self._profile=profile_obj @profile.setter def profile (self, profile_obj): self._profile=profile_obj def _prepare_device (self): #print("device: %s" % self._profile.device.name) self._event_status = {key:0 for key in self._profile.device.get_events_supported()} #print("Events supported",self._event_status) self._allowed_event_types = set([getattr(ecodes,i[-1].split(':')[0]) for i in self._event_status]) self._input_devices = map(InputDevice, self._profile.device.devices) self._file_descriptors = { dev.fd: i for i, dev in enumerate(self._input_devices) } print("Mappings for '" + self._profile.name+"' " + '-'*30) for k,v in sorted(self._profile.mapping.items()): print(str(k.replace("EV_KEY:KEY_","").replace("ENTER","<-'").replace("APOSTROPHE","'").replace(",","")).lower().rjust(20)+" as "+(', '.join(v)).replace("r_hand_tap_", "").replace("_vs_thumb", "")) # Prepare virtual device for event injection capabilities = {} for ev_chain in self._profile.mapping: for k in re.split('[+,]', ev_chain): et, ec = k.split(':', 1) etype = ecodes.ecodes[et] if etype in capabilities: capabilities[etype].append(ecodes.ecodes[ec]) else: capabilities[etype] = [ ecodes.ecodes[ec] ] #print("Capabilities", capabilities) self._virtual_input = UInput(events = capabilities) # Prepare matcher self._matcher = Matcher(self._profile) def start (self): """ Start capturing from the device(s) of the current profile """ self._prepare_device() try: for dev in self._input_devices: dev.grab() except Exception as e: print("Unable to grab device", e) self._running = True spawn(self._capture_loop) spawn(self._process_loop) sleep(0) def stop (self): """ Stop capturing and release the device(s) """ self._running = False try: for dev in self._input_devices: dev.ungrab() except: pass if self._virtual_input: self._virtual_input.close() def _capture_loop (self): try: devices = {dev.fd : dev for dev in self._input_devices} while self._running: r,_,_ = select(devices, [], [], timeout=self._profile.double_click_timeout/1000.0) for fd in r: for event in devices[fd].read(): #print("Lo que", event) if self.is_allowed_event(event, fd): event_code = self.get_event_code(event, fd) if event.value == 1: #print("Pressed!", event_code) self.set_event_status(event_code, 1) #print("Statuses", self._event_status) self._store_gestures() elif event.value == 0: #print("Released!", event_code) self.set_event_status(event_code, 0) #print("Statuses", self._event_status) #self._store_gestures() else: print("What is this?",event) except: self._running = False traceback.print_exc() def _store_gestures (self): """ Fetch current gestures, select those allowed in the profile and store them in the queue of gesture codes """ filtered_gestures = set([ gesture for gesture in self.get_current_gestures() if gesture in self._profile.gestures ]) if not filtered_gestures: return if filtered_gestures != self._last_queued: distance = 1.0 if self._last_queued: distance = self._profile.get_gestures_distance(filtered_gestures, self._last_queued) #print("Distance is", distance) self._last_queued = filtered_gestures if distance <= 0.5: try: timestamp, last_queued = self._gesture_codes.pop() #print("Time interval", time.time() - timestamp) if time.time() - timestamp > self._profile.double_click_timeout/6000.0: self._gesture_codes.append((timestamp, last_queued)) except IndexError: pass #print("Empty queue!") self._gesture_codes.append((time.time(), filtered_gestures)) def _process_loop (self): """ TODO: Process the queue of gestures and inject remapped events """ while self._running: try: timestamp, prefixes = self._gesture_codes.pop() reduced = self._profile.reduce_gestures(prefixes) self._matcher.add_prefix(timestamp, reduced) # With every possible option if any... for candidate, output_event in self._matcher.get_matching_codes(): #print("Candidate(s)",candidate, "would generate events(s)", output_event) self._inject_event(output_event) except IndexError: pass except: print("Processing error") traceback.print_exc() sleep(self._profile.double_click_timeout/5000.0) def _inject_event (self, event_codes): """ Insert one or several events in the virtual input event_code is a string, normally, that contains an event, like 'EV_KEY:KEY_A' event_code can have a sequence of events, like 'EV_KEY:KEY_A,EV_KEY:KEY_B,EV_KEY:KEY_C' event_code can contain chained events, to make combination of keys, like: 'EV_KEY:KEY_LEFTCTRL+EV_KEY:KEY_C' """ if isinstance(event_codes, str): event_codes = event_codes.split(",") for event_code_combo in event_codes: for operation in [1, 0]: for event_code in event_code_combo.split("+"): if isinstance(event_code, str): event_code = event_code.split(':') if len(event_code) == 1 and ':' in event_code[0]: event_code = event_code[0].split(':') etype = ecodes.ecodes[event_code[0]] ecode = ecodes.ecodes[event_code[1]] #names = ecodes.bytype[etype][ecode] #if not isinstance(names, list): # names = [names] #print("Inject event {} {}".format(names[0], "pressed" if operation==1 else "released")) self._virtual_input.write(etype, ecode, operation) # pressed self._virtual_input.syn() def find_event_keys (self, event_code, where): """ param event_code: Event code to search (as a tuple) param where: A dictionary whose keys are event codes Search and returns a list of matching keys for an event code, in the dictionary or list 'where' """ matches = [] event_keys = [event_code] if len(event_code) < 2: for dev_num, _ in enumerate(self._file_descriptors): event_keys.append(('DEV_%d' % dev_num, event_code[0])) else: event_keys.append((event_code[1],)) for ev in event_keys: if ev in where: matches.append(ev) return matches def is_allowed_event (self, event, fd = None): """ Decide if the controller must capture this event or not """ if event.type in self._allowed_event_types: event_code = self.get_event_code(event) if fd is not None: key1 = ('DEV_%d' % self._file_descriptors[fd], event_code[0]) key2 = event_code return key1 in self._event_status or key2 in self._event_status else: if not event_code in self._event_status: for dev_num, _ in enumerate(self._file_descriptors): key = ('DEV_%d' % dev_num, event_code[0]) if key in self._event_status: return True else: return True return False def get_event_code (self, event, fd = None): """ Returns the event code (type+code), prefixed by the device if 'fd' is present """ name = ecodes.bytype[event.type][event.code] #print("Event code from", event, "is", name) if isinstance(name, list): name = name[0] event_code = "%s:%s" % (ecodes.EV[event.type], name) if fd is not None: return ('DEV_%d' % self._file_descriptors[fd], event_code) return (event_code, ) def get_current_gestures (self, where_to_find = None, current_gestures = None): """ return: A set of gestures activated at this moment """ if where_to_find is None: where_to_find = self._profile.device.gestures_lut if current_gestures is None: current_gestures = set([]) for ev in self._event_status: if self._event_status[ev]: # Is pressed? for found in self.find_event_keys(ev, where_to_find): result = where_to_find[found] if isinstance(result, dict): self.get_current_gestures(result, current_gestures) else: current_gestures.add(result) return current_gestures def set_event_status (self, event_code, status): """ Sets the status for every event that matches the event_code """ for ev_key in self.find_event_keys(event_code, self._event_status): self._event_status[ev_key] = status def __del__ (self): print("Stopping capturer") if self._running: self.stop()
class MatcherTest(unittest.TestCase): def setUp(self): self.matcher = Matcher() def test_it_should_be_able_to_create_instance(self): self.assertEquals(Matcher, self.matcher.__class__) def test_it_should_be_able_to_say_that_this_is_the_solution(self): p = Possibility([ 0b1110000000000000111110000000000011100000000000001110000010000000, 0b0000000000110011111001100000000011100000000000001110000101000000, 0b0000001110000000111000011000000011100000000000001110001000100000, 0b0000110000000000111000000110000011100000000000001110010000010000, 0b0001000001000000111000000001000011100000000000001110100000001000, 0b0000000000001100111000000000110011100000000000001111000000000100, 0b0000000000000000111000000000001111100000000000001110000000000010, 0b0000000000000000111000000000000011110000000000011110000000000000, 0b0000000000000000111000000000000011101100000000101110000000000001, 0b0000000000000000111000000000000011100010000001001110000000000000, 0b0000000000000000111000000000000011100001000010001110000000000000, 0b0000000000000000111000000000000011100000100100001110000000000000, 0b0000000000000000111000000000000011100000011000001110000000000000 ]) self.assertEquals(True, self.matcher.match(p)) def test_it_should_be_able_to_say_that_this_pieces_are_fitting(self): self.assertEquals(True, self.matcher.areTheyFitting( 0b1110000000000000111110000000000000000000000000000000000000000000, 0b0001100000000000000001100000000000000000000000000000000000000000 )) def test_it_should_be_able_to_say_which_pieces_are_fitting(self): p = Possibility([ 0b1111100000000000000000000000000000000000000000000000000000000000, 0b0000011111000000000000000000000000000000000000000000000000000000, 0b0000000000111110000000000000000000000000000000000000000000000000, 0b0000000000000001111100000000000000000000000000000000000000000000, 0b0000000000000000000011111000000000000000000000000000000000000000, 0b0000000000000000000000000111110000000000000000000000000000000000, 0b0000000000000000000000000000001111100000000000000000000000000000, 0b0000000000000000000000000000000000011111100000000000110000000000, 0b0000000000000000000000000000000000001110000000000000001101000000, 0b0000000000000000000000000000000000000000001111110000000011100000, 0b0000000000000000000000000000000000000000000001100000000000011100, 0b0000000000000000000000000000000000000000000000000000000000000111, 0b0000000000000000000000000000000000000000011100000000000001110000 ]) self.matcher.evaluate(p) self.assertEquals([ 0b1111100000000000000000000000000000000000000000000000000000000000, 0b0000011111000000000000000000000000000000000000000000000000000000, 0b0000000000111110000000000000000000000000000000000000000000000000, 0b0000000000000001111100000000000000000000000000000000000000000000, 0b0000000000000000000011111000000000000000000000000000000000000000, 0b0000000000000000000000000111110000000000000000000000000000000000, 0b0000000000000000000000000000001111100000000000000000000000000000, 0b0000000000000000000000000000000000011111100000000000110000000000, 0b0000000000000000000000000000000000000000001111110000000011100000, 0b0000000000000000000000000000000000000000000000000000000000000111 ].sort(), p.pieces_fitted.sort())
def setUp(self): self.matcher = Matcher()
class SSD300: def __init__(self, sess): """ initialize SSD model as SSD300 whose input size is 300x300 """ self.sess = sess # define input placeholder and initialize ssd instance self.input = tf.placeholder(shape=[None, 300, 300, 3], dtype=tf.float32) self.ssd = SSD() # build ssd network => feature-maps and confs and locs tensor is returned fmaps, confs, locs = self.ssd.build(self.input, is_training=True) # zip running set of tensor self.pred_set = [fmaps, confs, locs] # required param from default-box and loss function fmap_shapes = [map.get_shape().as_list() for map in fmaps] # print('fmap shapes is '+str(fmap_shapes)) self.dboxes = generate_boxes(fmap_shapes) print(len(self.dboxes)) # required placeholder for loss loss, loss_conf, loss_loc, self.pos, self.neg, self.gt_labels, self.gt_boxes = self.ssd.loss( len(self.dboxes)) self.train_set = [loss, loss_conf, loss_loc] # optimizer = tf.train.AdamOptimizer(0.05) optimizer = tf.train.AdamOptimizer(learning_rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam') self.train_step = optimizer.minimize(loss) # provides matching method self.matcher = Matcher(fmap_shapes, self.dboxes) # inference process def infer(self, images): feature_maps, pred_confs, pred_locs = self.sess.run( self.pred_set, feed_dict={self.input: images}) return pred_confs, pred_locs # training process def train(self, images, actual_data): # ================ RESET / EVAL ================ # positives = [] negatives = [] ex_gt_labels = [] ex_gt_boxes = [] # ===================== END ===================== # # call prepare_loss per image # because matching method works with only one image def prepare_loss(pred_confs, pred_locs, actual_labels, actual_locs): pos_list, neg_list, t_gtl, t_gtb = self.matcher.matching( pred_confs, pred_locs, actual_labels, actual_locs) positives.append(pos_list) negatives.append(neg_list) ex_gt_labels.append(t_gtl) ex_gt_boxes.append(t_gtb) feature_maps, pred_confs, pred_locs = self.sess.run( self.pred_set, feed_dict={self.input: images}) for i in range(len(images)): actual_labels = [] actual_locs = [] # extract ground truth info for obj in actual_data[i]: loc = obj[:4] label = np.argmax(obj[4:]) # transform location for voc2007 loc = convert2wh(loc) loc = corner2center(loc) actual_locs.append(loc) actual_labels.append(label) prepare_loss(pred_confs[i], pred_locs[i], actual_labels, actual_locs) batch_loss, batch_conf, batch_loc = \ self.sess.run(self.train_set, \ feed_dict={self.input: images, self.pos: positives, self.neg: negatives, self.gt_labels: ex_gt_labels, self.gt_boxes: ex_gt_boxes}) self.sess.run(self.train_step, \ feed_dict={self.input: images, self.pos: positives, self.neg: negatives, self.gt_labels: ex_gt_labels, self.gt_boxes: ex_gt_boxes}) return pred_confs, pred_locs, batch_loc, batch_conf, batch_loss
class GameDB(QObject): def __init__(self,checkRuns={},parent=None): QObject.__init__(self, parent) if(not checkRuns): self.runs = {} self.regexes = GameDBRegex() self.dats = [] self.scrapers = [] self.exporter = Exporter() self.matcher = Matcher() self.patcher = Patcher('patches.xlsx') self.database = Database() started = pyqtSignal(str) progress = pyqtSignal(int) newmax = pyqtSignal(int) finished = pyqtSignal(str) def import_dats(self): self.dats = [] for xmlfile in os.listdir("DAT"): dat = DAT() dat.read_dat(os.path.join("DAT",xmlfile)) self.dats.append(dat) for dat in self.dats: print "parsing " + dat.filename self.import_dat(dat) self.dats = None self.import_new_ROMS() # temp fix to load serial print "Importing PSP serials" dat = DAT() dat.read_dat('libretro-database/metadat/no-intro/Sony - PlayStation Portable.dat') self.import_softwareflags(dat) def import_dat(self,dat): datFileId = None datGameId = None datRomId = None self.regexes.init_regexes(dat.releaseGroup) sysresult = self.regexes.get_regex_result("System",dat.header["name"]) systemId = self.database.getSystem(sysresult.group('Manufacturer'),sysresult.group('Name')) datType = 'Standard' if sysresult.group('DatType') == None else sysresult.group('DatType') datVersion = dat.header['version'] if dat.header['version'] is not None else dat.header['date'] #check if version is a date datDate = self.regexes.get_cleaned_date(datVersion) if datDate is not None: datVersion = datDate datFileId = self.database.getDATFile(systemId,dat.filename,datType,dat.releaseGroup,datVersion) for gamekey,gamevalue in dat.softwares.iteritems(): datGameId = self.database.getDATGame(datFileId,gamevalue['Name'],gamevalue['CloneOf'],gamevalue['RomOf']) for rom in gamevalue['Roms']: datRomId = self.database.getDATROM(datFileId,datGameId,rom['name'],rom['merge'],rom['size'],rom['crc'],rom['md5'],rom['sha1']) self.database.save() def import_new_ROMS(self): systemId = None releaseGroup = None softwareId = None releaseId = None romId = None datRoms = self.database.getNewRoms() for datRom in datRoms: #get releaseGroup regexes if releaseGroup != datRom[1]: releaseGroup = datRom[1] self.regexes.init_regexes(releaseGroup) #get system if systemId != datRom[0]: systemId = datRom[0] print "exporting new roms for " + self.database.getSystemName(systemId) #get software gameName = datRom[2] if datRom[3] == '' else datRom[3] softwareId = self.import_software(gameName,systemId) #get release releaseName = datRom[2] releaseId = self.import_release(releaseName,softwareId) #release flags self.import_releaseflags(releaseName,releaseId) romId = self.database.getROM(releaseId,*datRom[5:]) self.database.save() def import_software(self,gameName,systemId): softresult = self.regexes.get_regex_result("Software",gameName) softwarename = softresult.group('Name') softwaretype = 'BIOS' if softresult.group('BIOS') is not None else softresult.group('Type') if softresult.group('Type') is not None else 'Game' return self.database.getSoftware(systemId,softwarename,softwaretype) def import_release(self,releaseName,softwareId): compresult = self.regexes.get_regex_result("Compilation",releaseName) devstatusresult = self.regexes.get_regex_result("DevStatus",releaseName) demoresult = self.regexes.get_regex_result("Demo",releaseName) licenseresult = self.regexes.get_regex_result("License",releaseName) if compresult is not None: releaseType = compresult.group('Compilation') elif devstatusresult is not None: releaseType = devstatusresult.group('DevStatus') elif demoresult is not None: releaseType = demoresult.group('Demo') elif licenseresult is not None: releaseType = licenseresult.group('License') else: releaseType = 'Commercial' return self.database.getRelease(releaseName,releaseType,softwareId) def import_softwareflags(self,dat): for gamekey,gamevalue in dat.softwares.iteritems(): sysresult = self.regexes.get_regex_result("System",dat.header["name"]) systemId = self.database.getSystem(sysresult.group('Manufacturer'),sysresult.group('Name')) softwareId = self.import_software(gamevalue['Name'],systemId) for rom in gamevalue['Roms']: releaseId = self.import_release(gamevalue['Name'],softwareId) if 'Serial' in gamevalue: self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('ProductID'),gamevalue['Serial']) self.database.save() def import_releaseflags(self,releaseName,releaseId): for regionresult in self.regexes.get_regex_results("Region",releaseName): self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('Region'),regionresult.group('Region')) for languageresult in self.regexes.get_regex_results("Language",releaseName): self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('Language'),languageresult.group('Language')) versionresult = self.regexes.get_regex_result("Version",releaseName) if(versionresult): self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('Version'),versionresult.group('Version')) revisionresult = self.regexes.get_regex_result("Revision",releaseName) if(revisionresult): self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('Revision'),revisionresult.group('Revision')) baddumpresult = self.regexes.get_regex_result("DumpStatus",releaseName) if(baddumpresult): self.database.addReleaseFlagValue(releaseId,self.database.getReleaseFlag('BadDump'),baddumpresult.group('BadDump')) def import_scrapers(self): self.scrapers = [] scrapersfile = io.open('Scrapers/scrapers.csv','r',encoding='utf-8') for scraperline in scrapersfile: scraperCols = scraperline.split(';') scraperId = self.database.getScraper(*scraperCols) scraper = Scraper(*scraperCols) for scraperSystemKey,scraperSystem in scraper.systems.items(): print "exporting game data for " + scraper.name + " - " + scraperSystemKey scraperSystemId = self.database.getScraperSystem(scraperId,scraperSystem['systemName'],scraperSystem['systemAcronym'],scraperSystem['systemURL']) for game in scraperSystem['systemGames'].itervalues(): scraperGameId = self.database.getScraperGame(scraperSystemId,game['gameName'],game['gameUrl']) if game['gameParsed']=='Yes': for flag in game['softwareFlags']: self.database.addScraperGameFlagValue(scraperGameId,flag['name'],flag['value']) for release in game['releases']: scraperReleaseId = self.database.getScraperRelease(scraperGameId,release['name'],release['region'],release['type']) for flag in release['releaseFlags']: self.database.addScraperReleaseFlagValue(scraperReleaseId,flag['name'],flag['value']) for image in release['releaseImages']: scraperReleaseImageId = self.database.getScraperReleaseImage(scraperReleaseId,image['name'],image['type']) self.database.save() def match_systems(self): for synonym in self.matcher.synonyms: self.database.addSynonym(synonym['key'],synonym['value'],synonym['type']) self.database.matchSystemScraperSystem() self.database.save() def match_softwares(self): systems = self.database.getMappedSystems(1) ## scraperId 1 - GameFaqs for system in systems: print "Matching Softwares for System : " + system[1] releasegamelist = self.database.getScraperRelease2GameList(system[2]) releaseDic = {r[0]:r[1] for r in releasegamelist} gameDic = {r[0]:r[2] for r in releasegamelist} softwares = self.database.getSoftwareList(system[0]) for software in softwares: scraperReleaseId = self.matcher.match_fuzzy(releaseDic,software[1],"Full",80) if scraperReleaseId == None: scraperReleaseId = self.matcher.match_fuzzy(releaseDic,software[1],"Partial",86) if scraperReleaseId != None: self.database.addSoftwareMatch(software[0],gameDic[scraperReleaseId]) self.database.save() def match_releases(self): systems = self.database.getMappedSystems(1) ## scraperId 1 - GameFaqs for system in systems: print "Matching Releases for System : " + system[1] releaserows = self.database.getScraperReleaseList(system[0]) for releaserow in releaserows: matches = self.database.getScraperGame2ReleaseList(releaserow[2],releaserow[3]) if len(matches) == 1: self.database.addReleaseMatch(releaserow[0],matches[0][0]) elif len(matches) > 1: releaseDic = {m[0]:m[1] for m in matches} ## clean dat release Name to match it to scraper releaseName releaseName = self.regexes.get_regex_result('Software',releaserow[1]).group("Name") scraperReleaseId = self.matcher.match_fuzzy(releaseDic,releaseName,"Full",80) self.database.addReleaseMatch(releaserow[0],scraperReleaseId) self.database.save() def match_software_flags(self): print "Importing software flags" softwareflags = self.database.getSoftwareFlagList() for flagrow in softwareflags: flagid = self.database.getSoftwareFlag(flagrow[1]) flagvalue = "" if flagrow[1]=="Developer": flagvalue = self.database.getSynonym(flagrow[2],'Developer') flagvalue = self.regexes.get_cleaned_developer(flagvalue) elif flagrow[1]=="Genre": flagvalue = self.database.getSynonym(flagrow[2],'Genre') elif flagrow[1]=="Franchise": flagvalue = self.database.getSynonym(flagrow[2],'Franchise') if flagvalue != "" and flagvalue is not None: self.database.addSoftwareFlagValue(flagrow[0],flagid,flagvalue) self.database.save() def match_release_flags(self): print "Importing release flags" releaseflags = self.database.getReleaseFlagList() for flagrow in releaseflags: flagid = self.database.getReleaseFlag(flagrow[1]) flagvalue = "" if flagrow[1]=="Publisher": flagvalue = self.database.getSynonym(flagrow[2],'Developer') flagvalue = self.regexes.get_cleaned_developer(flagvalue) elif flagrow[1]=="ReleaseDate": flagvalue = self.regexes.get_cleaned_date(flagrow[2]) elif flagrow[1]=="ProductID": flagvalue = flagrow[2] elif flagrow[1]=="BarCode": flagvalue = flagrow[2] if flagvalue != "" and flagvalue is not None: self.database.addReleaseFlagValue(flagrow[0],flagid,flagvalue) self.database.save() def export_gamedbflags(self): lstFlags = [('Developer','developer','software'), \ ('Franchise','franchise','software'), \ ('Genre','genre','software'), \ ('Publisher','publisher','release'), \ ('ProductID','serial','release'), \ ('ReleaseDate','releasemonth','release'), \ ('ReleaseDate','releaseyear','release')] for flagtuple in lstFlags: flag = {} flag['srcName'] = flagtuple[0] flag['destName'] = flagtuple[1] flag['systems'] = [] systemrows = self.database.getSystemDic() for systemId, systemName in systemrows.iteritems(): print "Exporting {0} flag {1} for system {2}".format(flagtuple[2],flagtuple[1],systemName) system = {} system['name'] = systemName system['roms'] = [] rows = self.database.getSystemFlagValues(systemId,flag['srcName']) if systemName in ['Sony - PlayStation Portable','Sony - PlayStation']: rows = [r for r in rows if r[1] is not None] for row in rows: rom = {} rom['name'] = row[2] if systemName in ['Sony - PlayStation Portable','Sony - PlayStation']: rom['key'] = 'serial' rom['keyvalue'] = '"' + row[1] + '"' else: rom['key'] = 'crc' rom['keyvalue'] = row[0] if flag['destName']=="releasemonth": rom['flagvalue'] = str(datetime.strptime(row[3],'%Y-%m-%d %H:%M:%S').month) elif flag['destName']=="releaseyear": rom['flagvalue'] = str(datetime.strptime(row[3],'%Y-%m-%d %H:%M:%S').year) else: rom['flagvalue'] = row[3] system['roms'].append(rom) if len(system['roms']) > 0: system['roms'].sort(key=lambda x: x["name"]) flag['systems'].append(system) self.exporter.export_rdb_dat(flag) def export_rdbs(self): systemrows = self.database.getSystemDic() for systemId,systemname in systemrows.iteritems(): print "Exporting rdb for " + systemname if systemname in ['Sony - PlayStation Portable','Sony - PlayStation']: key = 'serial' else: key = 'rom.crc' self.exporter.create_rdb(systemname,key) def apply_patches(self,stage): patchname = "patch_" + stage + ".sql" self.patcher.GenerateScript(patchname,stage) self.database.run_script(patchname) print "Patch " + stage + " applied"
def test_getBestResultsMatchingWithUnicode(self): results = [{'SearchKey': [u'スーパー競輪']}] gamename = u'スーパー競輪' m = Matcher() x = m.getBestResults(results, gamename) self.assertTrue(x.get('SearchKey')[0] == u'スーパー競輪', "Expected matching unicode strings to match")
class VisualOdometry(object): def __init__(self, camera_params, odometry_poses): self.prev_frame_extracts = None self.prev_frame = None self.E = None self.F = None self.t = np.zeros(3) self.R = np.eye(3) self.kpe = KeyPointsExtractor(extractor='orb', detector='fast', num_points=5000, quality=0.001, min_dist=3) self.matcher = Matcher() self.tracker = Tracker() self.w = camera_params['frame_width'] self.h = camera_params['frame_height'] # self.f = camera_params['focal_length'] self._init_camera_intrinsic(fx=718.8560, fy=718.8560, cx=607.1928, cy=185.2157) with open(odometry_poses) as f: self.odometry_poses = f.readlines() def _init_camera_intrinsic(self, fx, fy, cx, cy): self.cx = cx self.cy = cy self.fx = fx self.fy = fy self.K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) def _E_from_F(self, F): return self.K.T.dot(F).dot(self.K) def draw_matches(self, img, matches): if img.shape[-1] == 1 or len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for key_point, prev_key_point in matches: x, y = key_point.astype(np.int32) x_prev, y_prev = prev_key_point.astype(np.int32) cv2.circle(img, (x, y), radius=1, color=(0, 255, 0), thickness=-1) # cv2.circle(img, (x_prev, y_prev), radius=1, color=(0, 255, 0), thickness=-1) cv2.line(img, (x, y), (x_prev, y_prev), color=(255, 0, 0), thickness=1) return img @staticmethod def show_img(img, img_title): cv2.imshow(img_title, img) # print(img.shape) def match(self, kps2, kps1, descs2, descs1): matches = self.matcher.match(descs1, descs2) good_matches = [] for m, n in matches: if m.distance < 0.75 * n.distance: kp1 = kps1[m.queryIdx].pt kp2 = kps2[m.trainIdx].pt good_matches.append((kp1, kp2)) # img3 = cv2.drawMatches(img, key_points, self.prev_frame, self.prev_frame_extracts['key_points'], # good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) # cv2.imshow('matches', img3) print("good matches: {}".format(len(good_matches))) return good_matches def track(self, prev_frame, cur_frame, fts_to_track): fts1, fts2 = self.tracker.track(prev_frame, cur_frame, fts_to_track) good_matches = [(ft1, ft2) for ft1, ft2 in zip(fts1, fts2)] return good_matches def getAbsoluteScale(self, frame_id): # specialized for KITTI odometry dataset ss = self.odometry_poses[frame_id - 1].strip().split() x_prev = float(ss[3]) y_prev = float(ss[7]) z_prev = float(ss[11]) ss = self.odometry_poses[frame_id].strip().split() x = float(ss[3]) y = float(ss[7]) z = float(ss[11]) return np.sqrt((x - x_prev) * (x - x_prev) + (y - y_prev) * (y - y_prev) + (z - z_prev) * (z - z_prev)) def process_frame(self, img, img_index): # keypoints extraction and descriptors computation # key_points, descriptors = self.kpe.extract(img) key_points = self.kpe.extract_fast(img) print("keypoints: {}".format(len(key_points))) good_matches = None if self.prev_frame_extracts is not None and self.prev_frame is not None: # matching # good_matches = self.match(key_points, self.prev_frame_extracts['key_points'], # descriptors, self.prev_frame_extracts['descriptors']) good_matches = self.track(self.prev_frame, img, self.prev_frame_extracts['key_points']) # filtering good_matches = np.array(good_matches) model, inliers = ransac((good_matches[:, 0], good_matches[:, 1]), FundamentalMatrixTransform, min_samples=8, residual_threshold=1, max_trials=100) good_matches = good_matches[inliers] print("Number of keypoints after filtering: {}".format( len(good_matches))) self.F = model.params self.E = self._E_from_F(self.F) # self.E, mask = cv2.findEssentialMat(good_matches[:, :, 0], good_matches[:, :, 1], # focal=self.fx, pp=(self.cx, self.cy), method=cv2.RANSAC, # prob=0.999, threshold=1.0) # pose estimation # TODO wtf is going on if write [:, :, 0] instead of [:, 0]?????? _, R, t, mask = cv2.recoverPose(self.E, good_matches[:, 0], good_matches[:, 1], focal=self.fx, pp=(self.cx, self.cy)) absolute_scale = self.getAbsoluteScale(img_index) if absolute_scale > 0.1: self.t = self.t + absolute_scale * self.R.dot(t.reshape(3)) self.R = R.dot(self.R) # self.t = self.t + self.R.dot(t.reshape(3)) # self.R = R.dot(self.R) # self.prev_frame_extracts = {'key_points': key_points, 'descriptors': descriptors} self.prev_frame_extracts = {'key_points': key_points} self.prev_frame = img if img_index == 0: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if good_matches is not None: img = self.draw_matches(img, good_matches) return img, self.t
def __init__(self, i3) -> None: """ Init function Main part is in self.initialize, which performs initialization itself. Args: i3: i3ipc connection """ # Initialize superclasses. cfg.__init__(self, i3) Matcher.__init__(self) # i3ipc connection, bypassed by negi3wm runner. self.i3ipc = i3 # map of tag to the tagged windows. self.tagged = {} # current_position for the tag [tag] self.current_position = {} # list of windows which fullscreen state need to be restored. self.restore_fullscreen = [] # is the current action caused by user actions or not? It's needed for # corrent fullscreen on/off behaviour. self.interactive = True # how many attempts taken to find window with priority self.repeats = 0 # win cache for the fast matching self.win = None # used for subtag info caching self.subtag_info = {} # Should the special fullscreen-related actions to be performed or not. self.need_handle_fullscreen = True # Initialize i3tree = self.i3ipc.get_tree() # prepare for prefullscreen self.fullscreened = i3tree.find_fullscreen() # store the current window here to cache get_tree().find_focused value. self.current_win = i3tree.find_focused() # winlist is used to reduce calling i3.get_tree() too many times. self.winlist = i3tree.leaves() for tag in self.cfg: self.tagged[tag] = [] self.current_position[tag] = 0 # tag all windows after start self.tag_windows(invalidate_winlist=False) self.bindings = { "next": self.go_next, "subtag": self.go_subtag, "add_prop": self.add_prop, "del_prop": self.del_prop, "reload": self.reload_config, } self.i3ipc.on('window::new', self.add_wins) self.i3ipc.on('window::close', self.del_wins) self.i3ipc.on("window::focus", self.set_curr_win) self.i3ipc.on("window::fullscreen_mode", self.handle_fullscreen)
class Tek11801(object): screenres = 552, 704 touchres = 11,22 expected_screenshot_size = (screenres[0] * screenres[1]) / 2 + 800 def __init__(self, f): self.f = f self.timing = False self.mode = None self.verbose = 0 self.failures = 0 time.sleep(2) self.resp_matcher = Matcher(r''' ((?P<key>[A-Z0-9/.]+):)? (?P<value> ("[^"]*") | ([^,]*) ) ''', re.VERBOSE) self.sfr_sma_matcher = Matcher('SFREQUENCY (?P<freq>[-+.0-9E]+),EQ;' + 'SMAGNITUDE (?P<mag>[-+.0-9E]+),EQ') self.init() def close(self): if self.f: self.f.close() self.f = None def junk(self): self.f.settimeout(0.1) while 1: d = self.f.readline() if not d: break if self.verbose >= 2: print 'junk', repr(d) def cmd(self, cmd): self.junk() if self.verbose >= 1: print 'cmd', repr(cmd) if self.timing: self.t0 = time.time() self.f.write(cmd + '\r') self.f.flush() def query(self, query, timeout = 1.0): self.cmd(query) self.f.settimeout(timeout) while 1: resp = self.f.readline() if self.verbose >= 1: print 'resp', repr(resp) if not resp: break resp = resp.rstrip() if resp and resp != '\xff': break if self.timing: self.t1 = time.time() self.elapsed = self.t1 - self.t0 print 'elapsed', self.elapsed return resp def init(self): self.f.settimeout(1.0) while 1: d = self.f.read() if not d: break if self.verbose >= 2: print 'junk', repr(d) self.avg = 0 self.id = self.query('ID?') assert self.id.startswith('ID TEK/11801') # self.cmd('BITMAP DATAFORMAT:TIFF,DATACOMPRESS:OFF,DIR:HORIZ,FORMAT:SCREEN') self.cmd('BITMAP DATAFORMAT:TIFF,DATACOMPRESS:OFF,DIR:VERT,FORMAT:SCREEN') def set_avg(self, avg): self.avg = avg self.cmd('NAVG %u' % self.avg) def copy(self, cb = None): self.cmd('COPY') if self.timing: self.t0 = time.time() self.f.settimeout(3.0) data = '' while 1: d = self.f.read(1024) if cb: cb(d) if not d: break data += d if len(data) > 1024: self.f.settimeout(1.0) if self.timing: self.t1 = time.time() self.elapsed = self.t1 - self.t0 print 'elapsed', self.elapsed assert data[-5:] == '\r\r\n\r\n' data = data[:-5] return data def copy_pil(self, cb = None): data = self.copy(cb) f = StringIO(data) image = Image.open(f) image = image.rotate(90, expand = 1) return image def vpcurve(self): resp = self.query('WFMPRE?;VPCURVE?') curves = resp.split(';') for curve in curves[:3]: print curve def init_freq_mag(self, local = True, scope = True): self.scope = scope self.cmd('REMOVE ALLTRACE') if self.scope: self.cmd('DISPLAY GRATICULE:DUAL') else: self.cmd('DISPLAY GRATICULE:SINGLE') if 1: self.cmd('DISPLAY TYPE:NORMAL') else: self.cmd('DISPLAY TYPE:VARIABLE') self.cmd('DISPLAY PERSISTENCE:0.3') if self.avg: self.cmd('NAVG %u' % self.avg) self.cmd('TRACE1 DESCRIPTION:"AVG(M5)",WFMCALC:FAST') if self.scope: self.cmd('TRACE2 DESCRIPTION:"FFTMAG(AVG(M5))",WFMCALC:HIPREC') else: self.cmd('TRACE1 DESCRIPTION:"M5",WFMCALC:FAST') if self.scope: self.cmd('TRACE2 DESCRIPTION:"FFTMAG(M5)",WFMCALC:HIPREC') self.cmd('ADJTRACE1 PANZOOM:OFF,GRLOCATION:UPPER,COLOR:1') if self.scope: self.cmd('ADJTRACE2 PANZOOM:OFF,GRLOCATION:LOWER,COLOR:2,VPOSITION:-5.0E+1,VSIZE:1.0E+1') time.sleep(3) def set_tb(self, tb): self.cmd('TBMAIN TIME:%.6g' % tb) def set_span(self, freq, l = 512): span = 1.33 * freq t = 25.0 * 500.0 / span * l / 512 / 512 self.cmd('TBMAIN LENGTH:%u,TIME:%.6g' % (l, t)) def parse_resp(self, s): a = [] i = 0 while 1: # print i, s[i:] j = s.index(' ', i) name = s[i:j] i = j + 1 a2 = [] while 1: match = self.resp_matcher.match(s, i) if not match: break j = self.resp_matcher.end() # print i, s[i:j] i = j if s[i:i+1] != ',': break i += 1 key, value = self.resp_matcher.group('key', 'value') if value.startswith('"'): assert value.endswith('"') value = value[1:-1] if key: a2.append((key, value)) else: a2.append(value) a.append((name, a2)) if s[i:i+1] != ';': break i += 1 assert i == len(s) # if i != len(s): # print left, s[i:] return a def capture(self): if self.avg: self.cmd('CONDACQ TYPE:AVG') else: self.cmd('CONDACQ TYPE:RECORD') self.cmd('CONDACQ WAIT') def get_waveform(self, trace, *args, **kwargs): s = self.query('OUTPUT %s; WAV?' % trace, *args, **kwargs) a = self.parse_resp(s) assert a[0][0] == 'WFMPRE' wfmpre = dict(a[0][1]) assert wfmpre['WFID'] == trace assert a[1][0] == 'CURVE' assert a[1][1][0] == ('CRVID', trace) samples = numpy.array(a[1][1][1:]) samples = samples.astype(numpy.float) samples = samples * float(wfmpre['YMULT']) samples = samples + float(wfmpre['YZERO']) sample_start = float(wfmpre['XZERO']) sample_spacing = float(wfmpre['XINCR']) return samples, sample_start, sample_spacing def fft(self, samples, spacing): # samples = samples * scipy.signal.blackmanharris(len(samples), False) freqs = numpy.fft.rfftfreq(len(samples), spacing) fft = numpy.fft.rfft(samples) return freqs, fft def find_peak(self, freqs, values): if 1: # Noisy signals have a lot of energy in the lowest # and highest frequencies so skip those. i = numpy.argmax(abs(values[10:-10])) + 10 else: i = numpy.argmax(abs(values)) freq = freqs[i] # Bin spacing spacing = freqs[1] - freqs[0] # Two algorithms for improving the frequency estimate for a # peak in a FFT from "On Local Interpolation of DFT Outputs" # by Eric Jacobsen, EF Data Corporation, EDICS 3.1.1. Do not # apply any windowing on the FFT, it will not work well. if 1: # Jacobsen if self.verbose >= 2: print values[i-1:i+1+1] fadj = ((values[i-1] - values[i+1]) / (2*values[i] - values[i-1] - values[i+1])).real else: # Quinn if self.verbose >= 2: print values[i-1:i+1+1] a1 = (values[i-1] / values[i]).real A2 = (values[i+1] / values[i]).real d1 = a1 / (1 - a1) d2 = -a2 / (1 - a2) if d1 > 0 and d2 > 0: fadj = d2 else: fadj = d1 freq += fadj * spacing # Algorithm for improving magnitude estimate from a FFT # https://www.dsprelated.com/showarticle/155.php value = values[i] value -= 0.94247 * (values[i-1] + values[i+1]) value += 0.44247 * (values[i-2] + values[i+2]) return freq, value def get_freq_mag(self, trace = None): if self.scope: return self.get_freq_mag_scope(trace) else: return self.get_freq_mag_local(trace) def get_freq_mag_local(self, trace = None): if trace == None: trace = 'TRACE1' self.samples, self.samples_start, self.samples_spacing = self.get_waveform(trace, timeout = 5.0) self.fft_freqs, values = self.fft(self.samples, self.samples_spacing) ofs = 10 - u_db(len(values)) self.fft_mags = u_db(abs(values)) + ofs freq, value = self.find_peak(self.fft_freqs, values) mag, phase = cmath.polar(value) mag = u_db(mag) + ofs return freq, mag, phase def get_freq_mag_scope(self, trace = None): if trace == None: trace = 'TRACE2' for i in range(5): resp = self.query('OUTPUT %s; SFR?;SMA?' % trace, timeout = 3.0) if self.sfr_sma_matcher.match(resp): break else: return 0.0, 0.0 freq = float(self.sfr_sma_matcher.group('freq')) mag = float(self.sfr_sma_matcher.group('mag')) return freq, mag def init_freq_power(self, channel): self.channel = channel self.trace = 'TRACE1' self.cmd('REMOVE ALLTRACE') self.cmd('DISPLAY GRATICULE:SINGLE') self.cmd('DISPLAY TYPE:NORMAL') self.cmd('%s DESCRIPTION:"%s",WFMCALC:FAST' % ( self.trace, self.channel)) self.cmd('ADJ%s PANZOOM:OFF,GRLOCATION:UPPER,COLOR:1' % self.trace) # TODO should set the vertical scale to something suitable def measure_freq_power(self, freq): self.set_span(1.0 * freq) self.capture() samples, sstart, sspacing = self.get_waveform(self.trace, timeout = 5.0) fft_freqs, fft_powers = self.fft(samples, sspacing) fft_powers *= 10 ** 0.5 fft_powers /= len(fft_powers) mfreq, mpower = self.find_peak(fft_freqs, fft_powers) return mfreq, mpower
def the_sim(self, X, Y): assert (not isinstance(self.measure, str) or not isinstance(self.measure, list)), "not implemented: \ change distance \n distance must be a distance object with node_sim and edge_sim methods" return Matcher.the_sim(self, X, Y)
def setUp(self): self.m = Matcher('AAGCAGTGGTATCAACGCAGAGTACGCGGG')