Exemplo n.º 1
0
def match_test(src, tmp, is_pass, is_verbose=False):
    """
    模版测试-正常模版
    """
    print('\n[TEST]AdMatch类测试-模版:' + tmp)
    # ad_match = AdMatch(
    #     src,
    #     tmp,
    #     is_debug=verbose,
    #     count_time=2,
    # )
    # ad_match.process()
    # print("[TEST]检测总耗时:" + str(ad_match.total_run_time) + "s.")
    # if ad_match.match_judge == is_pass:
    #     print("[TEST]目标:" + src + " 测试通过!")
    #     if is_verbose:
    #         ad_match.show(need_match_raw_result=False)
    #     return True
    # else:
    #     print("[TEST]目标:" + src + " 测试失败!")
    #     if is_verbose:
    #         ad_match.show(need_match_raw_result=False)
    #     return False
    match = Matcher(src, tmp).run_immediate()
    if match == is_pass:
        print("[TEST]目标:" + src + " 测试通过!")
        return True
    else:
        print("[TEST]目标:" + src + " 测试失败!")
        return False
Exemplo n.º 2
0
 def test_tree(self):
     """
     full test of tree construction and query
     """
     L = 4  # num levels in tree (depth)
     C = 10  # branching factor (num children per each node)
     dataset = "bottles"  # one of {"bottles","books","paintings"}
     (image_names, image_descriptors, image_keypoints) = \
         load_data('database', 'bottles')
     (q_ids, q_descriptors, q_kps) = load_data('query', 'bottles', 4)
     features = []
     for feats in image_descriptors:
         features += [np.array(fv, dtype='float32') for fv in feats]
     features = np.vstack(features)
     treeArray = constructTree(C, L, np.vstack(features))
     t = Tree(C, L, treeArray)
     t.build_tree(image_names, image_descriptors)
     t.set_lengths()
     matcher = Matcher(image_descriptors, image_keypoints, image_names)
     matcher.update_tree(t)
     matcher.add_queries(q_descriptors, q_kps)
     result = matcher.query(4)
     print result
     result_im = str(result[0][0][0])
     self.assertEqual(result_im, '004.jpg')
Exemplo n.º 3
0
 def _scan_path(self):
     self._scan_down_set(False)
     self._clear_paths()
     M = Matcher(asterisk_match_len_blacklist=tuple())
     for p in self._env_paths:
         try:
             ls = self._list_dir(p)
             assert next(ls) == 'init'
         except FileNotFoundError as ex:
             continue
         except Exception as ex:
             print('debug')
             continue
         for pp in ls:
             abspath = os.path.join(p, pp[1]).replace('\\', '/')
             if pp[0] == 1:
                 self._all_dirs.append(abspath)
             elif pp[0] == 0:
                 for suf in self._allow_suffixs:
                     M.set_substr(suf)
                     if '.' not in pp:
                         s = ''
                     else:
                         s = pp[1].split('.')[1]
                     if M.is_match(s):
                         self._all_files.append(abspath)
                 # if os.path.splitext(pp)[1] in self._allow_suffixs:
                 #     self._all_files.append(abspath)
     self._all_dirs = list(set(self._all_dirs))
     self._all_files = list(set(self._all_files))
     self._scan_down_set(True)
Exemplo n.º 4
0
    def generate_matcher(self):
        if not self.loaded_image_items:
            raise ValueError('Please load images first')

        matcher = Matcher(self.loaded_image_items, self.width, self.height)
        del self.loaded_image_items
        return matcher
Exemplo n.º 5
0
    def test_getBestResultsWithBrackets(self):
        results = [{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}]
        gamename = 'FIFA \'98 (1998) [Electronic Arts]'

        m = Matcher()
        x = m.getBestResults(results, gamename)
        self.assertEquals(x.get('SearchKey')[0], 'FIFA 98')
Exemplo n.º 6
0
    def process_image(self, window, file, output_type):
        """
        Process input image.
        :param window: PyQt window
        :param file: the name of the input file
        :param output_type: the type of the output
        :return: the name of the output file
        """
        image = read_image_bgr(file.full_path)
        objects_detected = self.detect_objects(image)
        output_name = file.get_output_name(output_type)

        if output_type == OutputType.BORDERS:
            output_image = self.draw_bounding_boxes(image, objects_detected)

        elif output_type == OutputType.PANORAMA:
            objects_cropped = self.crop_objects(image, objects_detected)
            matcher = Matcher(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            matcher.process_objects(0, objects_cropped)
            matcher.save_objects(output_name.split('.')[0] + '-dir/', 0, objects_cropped)
            output_image = matcher.get_panorama(InputType.IMAGE)
        else:
            raise ValueError

        # Save output image to tmp/
        image = Image.fromarray(output_image)
        image.save(output_name)

        # Visualize output image
        window.actualize_output_label(npimg_to_pixmap(output_image))

        return output_name
Exemplo n.º 7
0
    def test_getBestResultsWithRomanNumerals(self):
        results = [{'SearchKey': ['Tekken 2']}, {'SearchKey': ['Tekken 3']}, {'SearchKey': ['Tekken IV']}]
        gamename = 'Tekken II'

        m = Matcher()
        x = m.getBestResults(results, gamename)
        self.assertEquals(x.get('SearchKey')[0], 'Tekken 2')
Exemplo n.º 8
0
 def test_getBestResultsNonMatchingWithUnicode(self):
     results = [{'SearchKey': [u'スーパー競輪']}]
     gamename = 'Super Test Game'
     m = Matcher()
     x = m.getBestResults(results, gamename)
     self.assertIsNone(
         x, "Expected non-matching strings to not match, including unicode")
Exemplo n.º 9
0
def findValidWords(grid, rows, cols):
    valid_words = []

    for i in range(len(grid)):
        for j in range(len(grid[0])):
            compass = Compass()
            sequence_matcher =  Matcher(grid[i][j])

            # for each index in character grid, obtain all the valid words
            # starting at that index in all 8 directions - left, right, top, down,
            # and all four diagonals.
            for TURN in range(8):
                x, y = i, j

                # obtain current change in rows(i) and change in columns(j) from 
                # compass (compass automatically updates to the next direction)
                iChange, jChange = compass.get_curr_index_changes()

                # compute character sequence in current direction
                char_sequence = []
                while isWithinRange(x, y, rows, cols):
                    char_sequence.append(grid[x][y])
                    x += iChange
                    y += jChange
                # compute correct matches
                correct_matches = sequence_matcher.process_sequence(char_sequence)
                valid_words.extend(correct_matches)

    return valid_words
Exemplo n.º 10
0
    def __init__(self, sess):
        """
        initialize SSD model as SSD300 whose input size is  300x300
        """
        self.sess = sess

        # define input placeholder and initialize ssd instance
        self.input = tf.placeholder(shape=[None, 300, 300, 3], dtype=tf.float32)
        ssd = SSD()

        # build ssd network => feature-maps and confs and locs tensor is returned
        fmaps, confs, locs = ssd.build(self.input, is_training=True)

        # zip running set of tensor
        self.pred_set = [fmaps, confs, locs]

        # required param from default-box and loss function
        fmap_shapes = [map.get_shape().as_list() for map in fmaps]
        # print('fmap shapes is '+str(fmap_shapes))
        self.dboxes = generate_boxes(fmap_shapes)
        print(len(self.dboxes))

        # required placeholder for loss
        loss, loss_conf, loss_loc, self.pos, self.neg, self.gt_labels, self.gt_boxes = ssd.loss(len(self.dboxes))
        self.train_set = [loss, loss_conf, loss_loc]
        optimizer = tf.train.AdamOptimizer(0.05)
        self.train_step = optimizer.minimize(loss)

        # provides matching method
        self.matcher = Matcher(fmap_shapes, self.dboxes)
Exemplo n.º 11
0
def api_get(event, context):
    """
    /api/{controller}/{method}/{c}/{d}
    /api/{controller}/{method}/{c}
    :param event:
    :param context:
    :return:
    """
    from matcher import Matcher
    res, msg = Matcher().match(
        ingest_job_id='123456789',
        file_path=
        'https://hotel-api-downloads.s3.us-east-2.amazonaws.com/MatchingTest.xlsx',
        sheet_name='Transaction Template')

    return {
        'statusCode': 200,
        'headers': {
            'Access-Control-Allow-Origin': "*",
            'Access-Control-Allow-Credentials': True,
            'Access-Control-Allow-Methods': "*",
            'Access-Control-Allow-Headers': "*",
        },
        'body': json.dumps({
            'success': res,
            'message': msg
        })
    }
Exemplo n.º 12
0
def main():
    argument_parser = get_argument_parser()
    arguments = argument_parser.parse_args()

    lnd = Lnd(arguments.lnddir, arguments.grpc)

    if arguments.electrum_server:
        Electrum.set_server(arguments.electrum_server)

    config = configparser.ConfigParser(
        converters={'list': lambda x: [i.strip() for i in x.split(',')]})
    config.read(arguments.config)

    if not os.path.exists(arguments.config):
        debug("Config file not found")
        return False

    matcher = Matcher(lnd, config)

    channels = lnd.get_channels()
    for channel in channels:
        policy = matcher.get_policy(channel)

        (new_base_fee_msat, new_fee_ppm) = policy.execute(channel)

        if channel.chan_id in lnd.feereport:
            (current_base_fee_msat,
             current_fee_ppm) = lnd.feereport[channel.chan_id]

        fee_ppm_changed = new_fee_ppm and current_fee_ppm != new_fee_ppm
        base_fee_changed = new_base_fee_msat and current_base_fee_msat != new_base_fee_msat
        is_changed = fee_ppm_changed or base_fee_changed

        if is_changed or arguments.verbose:
            print(
                fmt.col_lo(fmt.print_chanid(channel.chan_id).ljust(14)) +
                fmt.print_node(lnd.get_node_info(channel.remote_pubkey)))

        if is_changed and not arguments.dry_run:
            lnd.update_chan_policy(channel.chan_id, new_base_fee_msat,
                                   new_fee_ppm)

        if is_changed or arguments.verbose:
            print("  policy:        %s" % fmt.col_hi(policy.name))
            print("  strategy:      %s" %
                  fmt.col_hi(policy.config.get('strategy')))
            if new_base_fee_msat is not None:
                s = ''
                if base_fee_changed:
                    s = ' ➜ ' + fmt.col_hi(new_base_fee_msat)
                print("  base_fee_msat: %s%s" %
                      (fmt.col_hi(current_base_fee_msat), s))
            if new_fee_ppm is not None:
                s = ''
                if fee_ppm_changed:
                    s = ' ➜ ' + fmt.col_hi(new_fee_ppm)
                print("  fee_ppm:       %s%s" %
                      (fmt.col_hi(current_fee_ppm), s))

    return True
Exemplo n.º 13
0
def test_matcher_basic_fairflow():
    reviewers = ['reviewer1', 'reviewer2', 'reviewer3']
    papers = ['paper1', 'paper2', 'paper3']

    scores = [
        (paper, reviewer, random.random()) \
        for paper, reviewer in itertools.product(papers, reviewers)
    ]

    minimums = [1, 1, 1]
    maximums = [1, 1, 1]
    demands = [1, 1, 1]

    test_matcher = Matcher(
        {
            'reviewers': reviewers,
            'papers': papers,
            'scores_by_type': {'affinity': {'edges': scores}},
            'weight_by_type': {'affinity': 1},
            'minimums': minimums,
            'maximums': maximums,
            'demands': demands,
            'num_alternates': 1
        },
        solver_class = 'FairFlow'
    )

    test_matcher.run()

    assert test_matcher.solution.any()
    assert test_matcher.assignments
    assert test_matcher.alternates
Exemplo n.º 14
0
    def _find_xcode_header(self, file_path):
        with open(file_path, 'r') as file:
            content = ''.join(file.readlines())
        header = Matcher(content, trim_new_lines=self.args.trim).match()
        if header is None:
            return False, None

        return True, content.replace(header, '')
Exemplo n.º 15
0
    def test_getBestResultsWithApostropheAndYear(self):
        results = [{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}]
        gamename = 'FIFA \'98'

        m = Matcher()
        x = m.getBestResults(results, gamename)
        self.assertTrue(x.get('SearchKey')[0] == 'FIFA 98',
                        "Expected to match title (was {0})".format(x.get('SearchKey')[0]))
Exemplo n.º 16
0
 def test_getBestResultsMatchingWithUnicode(self):
     results = [{'SearchKey': [u'スーパー競輪']}]
     gamename = u'スーパー競輪'
     m = Matcher()
     x = m.getBestResults(results, gamename)
     self.assertTrue(
         x.get('SearchKey')[0] == u'スーパー競輪',
         "Expected matching unicode strings to match")
Exemplo n.º 17
0
    def describe(self, instance):
        camera = self.ids['camera']
        matcher = Matcher(camera)
        score, img = matcher.match()

        sound = SoundLoader.load('audio/camera_shutter_snap.mp3')
        sound.play()

        self.res_label = img + ': ' + str(score)
Exemplo n.º 18
0
    def match(self, object1, object2):
        """
        :type    object1 Data
        :type    object2 Data
        :rtype:  Matcher
        """

        matcher = Matcher(object1, object2)
        matcher.match()

        return matcher
def main():
    seq = []
    images = glob.glob(path + '*.tif')
    for i in images:
        image = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
        seq.append(image)

    preprocessor = Preprocessor(seq)
    detector = Detector(preprocessor)
    matcher = Matcher(detector)
    drawer = Drawer(matcher, preprocessor)

    masks = preprocessor.get_masks()

    print('Generating all frames and cell states...')
    drawer.load()
    print('Successfully loaded all images')

    # Save all generated images and their masks to disk
    counter = 1
    for g in drawer.get_gen_images():
        annotated = cv2.imwrite(path + f'gen/{counter}.tif', g)
        mask = cv2.imwrite(path + f'gen/{counter}_mask.tif',
                           masks[counter - 1])
        if not annotated or not mask:
            print(f'Failed to save')
        counter += 1
    print('Saved all images')

    # Now standby for user to issue commands for retrieval
    while True:
        string = input(
            'Input a frame and cell ID (optional) separated by a space...\n')
        if string:
            string = string.split(' ')
            frame = int(string[0])
            if len(string) > 1:
                try:
                    id = int(string[1])
                    display_image = drawer.serve(frame, id)
                except ValueError:
                    print(f'Not an integer')
                    display_image = drawer.serve(frame)
            else:
                display_image = drawer.serve(frame)
            # plt.imshow(display_image)
            # plt.axis('off')
            # plt.show()
            # cv2.imshow('image',display_image)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

        else:
            break
def get_matcher(net, opt):
    idxs = [x for x in opt['layers'].split(',')]
    # idxs = [int(x) for x in opt['layers'].split(',')]
    matcher = Matcher(opt['what'])

    def hook(module, input, output):
        matcher(module, output)

    for i in idxs:
        net._modules[i].register_forward_hook(hook)

    return matcher
def run_trial(house_num: int, matcher_spec: MatcherSpec, aggregated, disaggregated, main_ind, data_vec, labels, always_on):
    disagg_settings, data_settings = load_settings(house_num)

    stat_log = stats.StatStore()
    matcher = Matcher(stat_log, len(labels) + 3, labels, always_on, matcher_spec)
    disaggregator: Disaggregator = PerfectDisaggregator(disagg_settings)

    (hist_delta_power, hist_events, current_time) = disaggregator.initialize(data_vec)
    event_offset = len(hist_events)

    #Traverse through the data each frame at a time
    current_frame = 0
    total_frames = int((len(data_vec) - disagg_settings.init_size) / disagg_settings.frame_size)

    while current_time < len(data_vec):
        if verbose:
            print('\nProcessing frame {} of {} from times {} to {}'.format(current_frame, total_frames, current_time, current_time + disagg_settings.frame_size))
        (frame_delta_power, frame_events) = process_frame_data(data_vec, current_time, disagg_settings)

        if verbose:
            print('\tDisaggregating appliances')

        hist_delta_power += frame_delta_power
        hist_events += frame_events

        # Use hist_delta_power length because it contains new data
        # Using current time returns a 0 length array
        gsp_truth = disaggregated[0:len(hist_delta_power)]
        gsp_results = disaggregator.process_frame(data_vec, main_ind, hist_delta_power, frame_events, disaggregated, current_time)
        matcher.process_frame(current_frame, current_time, disagg_settings.frame_size, gsp_results, gsp_truth)

        # Compute final matching
        gsp_results = matcher.final_matching(gsp_truth)

        # Compute statistics
        accuracy = compute_accuracy(gsp_results.columns, disaggregated.columns)
        stat_log.push(current_frame, 'accuracies', accuracy)
        if verbose:
            print('\tAccuracy of {:.2f}'.format(accuracy))
        
        # Advance frame
        event_offset += len(frame_events)
        current_frame += 1
        current_time += disagg_settings.frame_size
    #Also, some way to easily view the asked questions through histogram, table, or something
    #After adding nicer stats/reporting, integrate new periodicity measurement with lowest auto-correff std/avg being most periodic
    #Then finalize how to do the synthetic data creation and piping into here. *More notes in data_combiner.py*

    if verbose:
        matcher.print_stats(gsp_results, disaggregated)
        gsp_v.graph_all(aggregated, disaggregated, gsp_results)
    
    return stat_log
Exemplo n.º 22
0
 def _basic_match_scan(self, pattren, allow_space):
     '''获取满足匹配的行号'''
     R = []
     M = Matcher(substr=pattren, asterisk_match_len_blacklist=tuple())
     for i, l in enumerate(self._file_data_line):
         t = l
         if allow_space:
             t = l.strip()
         #匹配
         if M.is_match(t):
             #加入结果(行号)
             R.append(i)
     return R
Exemplo n.º 23
0
def make_rpn_loss_evaluator(cfg, box_coder):
    matcher = Matcher(
        cfg.FG_IOU_THRESHOLD,
        cfg.BG_IOU_THRESHOLD,
        allow_low_quality_matches=True,
    )

    fg_bg_sampler = BalancedPositiveNegativeSampler(cfg.BATCH_SIZE_PER_IMAGE,
                                                    cfg.POSITIVE_FRACTION)

    loss_evaluator = RPNLossComputation(matcher, fg_bg_sampler, box_coder,
                                        generate_rpn_labels)
    return loss_evaluator
def main():
    params = Params()

    if not os.path.isdir(params.dataset_root):
        raise Exception("Unable to load images from " + params.dataset_root +
                        ": not a directory")

    if not os.path.exists(params.output_dir):
        os.mkdir(params.output_dir)

    if not os.path.isdir(params.output_dir):
        raise Exception("Unable to save results to " + params.output_dir +
                        ": not a directory")

    if (params.dataset == "DIC-C2DH-HeLa"):
        path = params.dataset_root + "/" + str(
            list(params.images_idx.keys())[0])
    elif (params.dataset == "PhC-C2DL-PSC"
          and params.nn_method == "DeepWater"):
        path = params.dataset_root + "/" + str(
            list(params.images_idx.keys())[0])
    else:
        path = params.dataset_root
    # seq = []
    images = glob.glob(path + '/*.tif')
    #sort the order of images
    images = [(int(x[-7:-4]), x) for x in images]
    images.sort(key=lambda x: x[0])
    images = [x[1] for x in images]

    preprocessor = Preprocessor(images, params)
    detector = Detector(preprocessor)
    matcher = Matcher(detector)
    drawer = Drawer(matcher)

    masks = preprocessor.get_masks()

    counter = 1
    while True:
        inp = input('Serving next frame... type a Cell ID to inspect details')
        drawer.next()
        try:
            inp = int(inp)
            display_image = drawer.serve(inp)
        except:
            print(f'Not an integer')
            display_image = drawer.serve()

        plt.imsave(path + f'gen/{counter}.jpg', display_image)
        plt.imsave(path + f'gen/{counter}_mask.jpg', masks[counter])
        counter += 1
Exemplo n.º 25
0
    def __init__(self, lmd=0.001, dbg=True):
        self.matcher_ = Matcher(dbg=dbg)
        self.lmd_  = lmd
        self.db_   = []

        # tracking data
        self.trk_  = None
        self.path_ = []
        self.cols_ = None

        # debugging flag
        self.dbg_ = dbg

        self.reset()
Exemplo n.º 26
0
    def __init__(self, *args, **kwargs):
        self._config = kwargs.get('config', None) or Config()
        self._app = kwargs.get('app', None) or Flask(self.__class__.__name__)
        self._matcher = kwargs.get('matcher', None) or Matcher()

        # do nothing by default
        self._default_handler = lambda x: None

        self._provider = (kwargs.get('provider', None)
                          or self._get_provider_from_config())

        self._app.add_url_rule(self._config.provider['url'],
                               view_func=self._sms_handler,
                               methods=self._config.provider['methods'])
Exemplo n.º 27
0
    def test_find_1(self):
        data = """0: 4 1 5
1: 2 3 | 3 2
2: 4 4 | 5 5
3: 4 5 | 5 4
4: "a"
5: "b"

ababbb
bababa
abbbab
aaabbb
aaaabbb"""
        matcher = Matcher(data)
        self.assertEqual(2, matcher.matches())
 def __init__(self):
     # Extractor
     self.extractor = Extractor()
     self.extractor.sgnExtTotalImg.connect(self.extProgBar)
     self.extractor.sgnExtProgress.connect(self.setExtProgBarVal)
     self.extractor.sgnExtException.connect(self.extractDatabaseException)
     self.extractor.sgnExtStatus.connect(self.extractDatabaseStatus)
     self.extractor.sgnExtDone.connect(self.extractDatabaseDone)
     # Matcher
     self.matcher = Matcher()
     self.matcher.sgnSrcTotalImg.connect(self.srcProgBar)
     self.matcher.sgnSrcProgress.connect(self.setSrcProgBarVal)
     self.matcher.sgnSrcException.connect(self.searchImageException)
     self.matcher.sgnSrcResult.connect(self.searchImageResult)
     self.matcher.sgnSrcDone.connect(self.searchImageDone)
Exemplo n.º 29
0
def to_fpga(rx):
    icestick = IceStick()
    icestick.Clock.on()
    icestick.D1.on()

    main = icestick.DefineMain()

    rom = string_to_rom('x' * 16)
    matcher = Matcher(rx)

    m.wire(rom, matcher.char)
    m.wire(matcher.match, main.D1)

    m.EndDefine()

    m.compile('regulair', main)
Exemplo n.º 30
0
def main():
    from matcher import Matcher
    from interface import Interface
    import attribute_names

    if os.path.exists(case_filename):
        with open(case_filename, "rb") as fp:
            ranges, cases = pickle.load(fp)
            for k, v in ranges.items():
                atrcls = getattr(attribute_names, k)
                atrcls._range = v
    else:
        print("Warning: No cases found (looking in '%s').") % case_filename
        cases = []
    matcher = Matcher(cases)
    interface = Interface(matcher)
    interface.cmdloop()