Exemple #1
0
def seek_time_ok(FILENAME,ERRORS):

  # create a video reader using the tiny videofile VIDEOS+FILENAME
  video=cvCreateFileCapture(VIDEOS+FILENAME)

  if video is None:
    # couldn't open video (FAIL)
    return 1

  if show_frames:
    cvNamedWindow("test", CV_WINDOW_AUTOSIZE)

  # skip 2 frames and read 3rd frame each until EOF and check if the read image is ok
  for k in [0,3,6,9,12,15,18,21,24,27]:

    cvSetCaptureProperty(video, CV_CAP_PROP_POS_MSEC, k*40)

    # try to query frame
    image=cvQueryFrame(video)

    if image is None:
    # returned image is NULL (FAIL)
      return 1

    compresult = match.match(image,k,ERRORS[k])
    if not compresult:
      return 1

    if show_frames:
      cvShowImage("test",image)
      cvWaitKey(200)

  # same as above, just backwards...
  for k in [27,24,21,18,15,12,9,6,3,0]:

    cvSetCaptureProperty(video, CV_CAP_PROP_POS_MSEC, k*40)

    # try to query frame
    image=cvQueryFrame(video)

    if image is None:
    # returned image is NULL (FAIL)
      return 1

    compresult = match.match(image,k,ERRORS[k])
    if not compresult:
      return 1

    if show_frames:
      cvShowImage("test",image)
      cvWaitKey(200)

  # ATTENTION: We do not release the video reader, window or any image.
  # This is bad manners, but Python and OpenCV don't care,
  # the whole memory segment will be freed on finish anyway...

  del video
  # everything is fine (PASS)
  return 0
    def test_no_match(self):
        vals = []
        vals.append(match("a"))
        vals.append(match("AB"))
        vals.append(match("A B"))

        for val in vals:
            self.assertIsNone(val, msg="Invalid pattern matched!")
 def test_find_match(self):
     body1 = Mock()
     body2 = Mock()
     match_cases = {
         MatchKey('tests', [], []) : 'body1()',
         MatchKey('test', [], []) : 'body2()'}
     match('test', match_cases)
     self.assertFalse(body1.called)
     self.assertTrue(body2.called)
 def test_harder_match(self):
     body1 = Mock()
     body2 = Mock()
     match_cases = {
         MatchKey('test %s %M %M', ['this'], ['var1', 'var2']) : \
           'body1()',
         MatchKey('test %s %M', ['this'], ['var1']) : 'body2(var1)'}
     match('test this case', match_cases)
     self.assertFalse(body1.called)
     body2.assert_called_once_with('case')
 def test_condition_match(self):
     body1 = Mock()
     body2 = Mock()
     match_cases = {
         MatchKey('test %M', [], [(lambda x : False, 'var1')]) \
         : 'body1()',
         MatchKey('test %M', [], [(lambda x : x == 'a', 'var1')]) \
         : 'body2()'}
     match('test a', match_cases)
     self.assertFalse(body1.called)
     self.assertTrue(body2.called)
Exemple #6
0
def test_should_not_match_candidates_when_equity_is_desired_but_not_offered():
    candidates = map(lambda a: {'desires_equity': True, 'current_location': a,
                                'desired_locations': []},
                     ['NY', 'SF', 'LA', 'CO'])
    job = {'equity_max': 0, 'locations': ['NY', 'SF', 'LA']}

    assert len(match(job, candidates)) == 0
Exemple #7
0
def test_should_match_candidates_currently_in_a_job_location():
    candidates = map(lambda a: {'desires_equity': False, 'current_location': a,
                                'desired_locations': []},
                     ['NY', 'SF', 'LA', 'CO'])
    job = {'equity_max': 0, 'locations': ['NY', 'SF', 'LA']}

    assert len(match(job, candidates)) == 3
def building_match(geoData, buildings, geoBuilidngs):
    with open(geoData, 'r') as f:
        tweet = json.load(f)
        geo_data = {"type": "FeatureCollection", "features": []}
        with open(buildings, 'r') as f1:
            tweet1 = json.load(f1)
            for data in tweet['features']:
                lat = data['geometry']['coordinates'][0]
                long = data['geometry']['coordinates'][1]
                geo_json_feature = {
                    "type": "Feature",
                    "geometry": data['geometry'],
                    "properties": data['properties'],
                    "buildings": []
                }
                for data1 in tweet1['features']:
                    coor = data1['geometry']['coordinates']
                    if match.match(lat, long, coor):
                        geo_json_feature['buildings'].append(
                            data1['properties'])
                geo_data["features"].append(geo_json_feature)
    print len(geo_data['features'])
    with open(geoBuilidngs, 'w') as fout:
        fout.write(json.dumps(geo_data, indent=4))
    fout.close()
Exemple #9
0
def checkstatus(repo, subset, pat, field):
    m = matchmod.match(repo.root, repo.getcwd(), [pat])
    s = []
    fast = (m.files() == [pat])
    for r in subset:
        c = repo[r]
        if fast:
            if pat not in c.files():
                continue
        else:
            for f in c.files():
                if m(f):
                    break
            else:
                continue
        files = repo.status(c.p1().node(), c.node())[field]
        if fast:
            if pat in files:
                s.append(r)
        else:
            for f in files:
                if m(f):
                    s.append(r)
                    break
    return s
Exemple #10
0
def checkstatus(repo, subset, pat, field):
    m = matchmod.match(repo.root, repo.getcwd(), [pat])
    s = []
    fast = (m.files() == [pat])
    for r in subset:
        c = repo[r]
        if fast:
            if pat not in c.files():
                continue
        else:
            for f in c.files():
                if m(f):
                    break
            else:
                continue
        files = repo.status(c.p1().node(), c.node())[field]
        if fast:
            if pat in files:
                s.append(r)
                continue
        else:
            for f in files:
                if m(f):
                    s.append(r)
                    continue
    return s
Exemple #11
0
 def validate(self, first, second):
     """
     Compares first to second to determine if they sufficiently agree.
     """
     matches = match(first, second,
                     lambda x, y: self.overlapcost(x, y))
     return sum(x[2] != 0 for x in matches) <= self.mistakes
Exemple #12
0
def checkstatus(repo, subset, pat, field):
    m = None
    s = []
    hasset = matchmod.patkind(pat) == 'set'
    fname = None
    for r in subset:
        c = repo[r]
        if not m or hasset:
            m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
            if not m.anypats() and len(m.files()) == 1:
                fname = m.files()[0]
        if fname is not None:
            if fname not in c.files():
                continue
        else:
            for f in c.files():
                if m(f):
                    break
            else:
                continue
        files = repo.status(c.p1().node(), c.node())[field]
        if fname is not None:
            if fname in files:
                s.append(r)
        else:
            for f in files:
                if m(f):
                    s.append(r)
                    break
    return s
Exemple #13
0
def do_match(img1, img2, cang, crat, cdesc):
    M = None

    # Get features and distances between every pair of points from both images
    (kpts1, des1) = get_features(img1, M, 'target.jpg')
    (kpts2, des2) = get_features(img2, M, 'reference.jpg')

    Hgt = Hypergraph(kpts1, des1)
    Hgr = Hypergraph(kpts2, des2)

    # draw.triangulation(kpts1, Hgt.E, img1, 'Triangulation 1')
    # draw.triangulation(kpts2, Hgr.E, img2, 'Triangulation 2')

    print 'Hypergraph construction done'
    edge_matches, point_matches = match(
        Hgt.E, Hgr.E, kpts1, kpts2, des1, des2,
        cang, crat, cdesc,
        0.7, 0.75, True
    )
    print 'Hyperedges matching done'

    # draw.edges_match(edge_matches, kpts1, kpts2, Hgt.E, Hgr.E, img1, img2)

    point_matches = sorted(point_matches, key=lambda x: x.distance)
    draw.points_match(point_matches, kpts1, kpts2, img1, img2)

    cv2.waitKey()
    cv2.destroyAllWindows()
Exemple #14
0
def apply_promo_store(userid, store_name):
    itemlist = find_shelf(userid, store_name)
    num_items = len(itemlist)
    print "Apply_promo: We have " + str(num_items) + " items in " + store_name + " shelf."
    total_combinations = 1 << (num_items)
    total_combinations -= 1
    print "Total combination: " + str(total_combinations)
    date_ = datetime.date.today()
    promo_date = Promoinfo.objects.filter(d=date_)
    if store_name == "Express":
        i = 0
    if store_name == "J.Crew":
        i = 1
    promo = promo_date.filter(store__id=i)

    # for all possible combinations
    # find the price by calling match.py
    # upper bound is total_combinations+1 because we are starting with index 1
    # and that is because we don't want to calculate discount for an empty wishlist
    # which will happen when j = 0

    for j in range(1, total_combinations + 1):
        wishlist = find_combination(itemlist, j)
        cached_result, digest = check_if_combination_exists(wishlist)
        if cached_result == None:
            print "No, didn't find result for list " + str(j) + " in cache, so storing it"
            orig_cost, total_cost, savings, shipping = match.match(store_name, date_, copy.deepcopy(wishlist), promo)
            # store this result
            new_result = StoreItemCombinationResults(
                combination_id=digest, price=orig_cost, saleprice=total_cost, free_shipping=shipping
            )
            new_result.save()

    print "Done with apply_promo_store"
Exemple #15
0
def search_pa_list(src: List[str]) -> List[str]:
    """Takes source, finds matching pattern and calls corresponding action. If it finds
    a match but has no answers it returns ["No answers"]. If it finds no match it
    returns ["I don't understand"].

    Args:
        source - a phrase represented as a list of words (strings)

    Returns:
        a list of answers. Will be ["I don't understand"] if it finds no matches and
        ["No answers"] if it finds a match but no answers
    """
    num_matches = 0
    result: List[str] = []
    for n in pa_list:
        pattern = n[0]
        checkMatch = match(pattern, src)
        if checkMatch != None:
            num_matches += 1
            func = n[1]
            result = func(checkMatch)
    if num_matches != 0 and result == []:
        result = ["No answers"]
    elif num_matches == 0 and result == []:
        result = ["I don't understand"]
    return result
Exemple #16
0
def main():
    src_image_path = "src.jpg"
    des_image_path = "dst.jpg"

    img1 = cv2.imread(src_image_path, 0) / 255.0  # greyscale
    img2 = cv2.imread(des_image_path, 0) / 255.0  # greyscale

    # Part A
    corner_points1, descriptors1 = HCD.harris_corner(img1)
    corner_points2, descriptors2 = HCD.harris_corner(img2)

    print "descriptors1 length ", len(descriptors1)
    print "descriptors2 length ", len(descriptors2)

    # Part B
    matches_arr = match.match(corner_points1,
                              corner_points2,
                              descriptors1,
                              descriptors2,
                              match.hamming_metric,
                              n=50)
    print matches_arr.shape
    print matches_arr

    # Part C
    H = findHomography.best_H(matches_arr)
    print H
Exemple #17
0
def checkstatus(repo, subset, pat, field):
    m = None
    s = []
    fast = not matchmod.patkind(pat)
    for r in subset:
        c = repo[r]
        if fast:
            if pat not in c.files():
                continue
        else:
            if not m or matchmod.patkind(pat) == 'set':
                m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
            for f in c.files():
                if m(f):
                    break
            else:
                continue
        files = repo.status(c.p1().node(), c.node())[field]
        if fast:
            if pat in files:
                s.append(r)
        else:
            for f in files:
                if m(f):
                    s.append(r)
                    break
    return s
Exemple #18
0
def merge(segments, method=None, threshold=0.5, groundplane=False):
    """
    Takes a list of segments and attempts to find a correspondance between
    them by returning a list of merged paths.

    Uses 'method' to score two candidate paths. If the score returned by
    'method' is greater than the number of overlaping frames times the 
    threshold, then the correspondance is considered bunk and a new path
    is created instead.

    In general, if 'method' returns 0 for a perfect match and 1 for a
    horrible match, then 'threshold' = 0.5 is pretty good.
    """
    if method is None:
        method = getpercentoverlap(groundplane)

    logger.debug("Starting to merge!")
    paths = {}
    segments.sort(key=lambda x: x.start)
    for path in segments[0].paths:
        paths[path.id] = path.getboxes(groundplane=groundplane), [path]
    for x, y in zip(segments, segments[1:]):
        logger.debug("Merging segments {0} and {1}".format(x.id, y.id))
        if x.stop < y.start:
            logger.debug("Segments {0} and {1} do not overlap".format(
                x.id, y.id))
            for path in y.paths:
                paths[path.id] = path.getboxes(groundplane=groundplane), [path]
        else:
            for first, second, score in match(x.paths, y.paths, method):
                logger.debug("{0} associated to {1} with score {2}".format(
                    first, second, score))
                if second is None:
                    continue

                isbirth = first is None
                if not isbirth:
                    scorerequirement = threshold * overlapsize(
                        first, second, groundplane)
                    if score > scorerequirement:
                        logger.debug(
                            "Score {0} exceeds merge threshold of {1}".format(
                                score, scorerequirement))
                        isbirth = True
                    else:
                        logger.debug("Score {0} satisfies merge threshold of "
                                     "{1}".format(score, scorerequirement))

                if isbirth:
                    paths[second.id] = second.getboxes(
                        groundplane=groundplane), [second]
                else:
                    path = mergepath(paths[first.id][0],
                                     second.getboxes(groundplane=groundplane))
                    paths[first.id][1].append(second)
                    paths[second.id] = (path, paths[first.id][1])
                    del paths[first.id]
    logger.debug("Done merging!")
    return paths.values()
Exemple #19
0
def getPosition():
    global sourceData, sourceSize, icon, landlordIcon_rect
    for i in range(3):
        if len(
                matchApp.match(sourceData, sourceSize, icon['landlordIcon'],
                               landlordIcon_rect[i])) >= 1:
            return i
    return -1
Exemple #20
0
def test_match_without_supplying_cleaned_text_single_word():
    ''' Unit test for match.match() without user-supplied cleaned text, matching on a single word '''
    # data from the Twitter Political Corpus: https://www.usna.edu/Users/cs/nchamber/data/twitter/ 
    original_text = """LIVING MY LIFE ONE STEP AT A TIME~NO NEED TO RUSH WHEN YOU HAVE PLENTY OF TIME~DON'T WORRY OVER THOSE WHO NEVER MADE IT TO YA FUTURE THE  @SpaceAstro the whole state of Arizona doesn't do Daylight Savings Time  #News #Environment #Nature Turmoil from climate change poses security risks http://economictimes.indiatimes.com/articleshow/5175652.cms  celebrates Halloween and time-travel with good friends, a scary movie, clingy cats, and hazelnut spice rum. Adieu, October; hello, November!  Working on my first video for the new #youtube channel. It's definitely going to be an acoustic cover of Times Like These - Foo Fighters #ff  of Beastly Behavior Sometimes the PEN is mightier than the sword or my tongue is sharper than my gun (but NOT always) When your ready to"""
    # see issue #6 for a discussion about how to handle matches on portions of hyphenated words
    gold = [(29, 33, 'TIME'), (74, 78, 'TIME'), (205, 209, 'Time'), (373, 377, 'time')]
    current = match.match(original_text, "time")
    eq_(current, gold)
Exemple #21
0
def test_should_match_candidates_desire_to_move_to_a_job_location():
    candidates = map(lambda a: {'desires_equity': True,
                                'current_location': 'WY',
                                'desired_locations': a},
                     [['NY', 'SF'], ['LA'], ['CO'], ['ME'], ['LA', 'WA']])
    job = {'equity_max': 1.2, 'locations': ['NY', 'SF', 'CO', 'WA']}

    assert len(match(job, candidates)) == 3
Exemple #22
0
def _matchfiles(repo, subset, x):
    # _matchfiles takes a revset list of prefixed arguments:
    #
    #   [p:foo, i:bar, x:baz]
    #
    # builds a match object from them and filters subset. Allowed
    # prefixes are 'p:' for regular patterns, 'i:' for include
    # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
    # a revision identifier, or the empty string to reference the
    # working directory, from which the match object is
    # initialized. Use 'd:' to set the default matching mode, default
    # to 'glob'. At most one 'r:' and 'd:' argument can be passed.

    # i18n: "_matchfiles" is a keyword
    l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
    pats, inc, exc = [], [], []
    hasset = False
    rev, default = None, None
    for arg in l:
        s = getstring(arg, _("_matchfiles requires string arguments"))
        prefix, value = s[:2], s[2:]
        if prefix == 'p:':
            pats.append(value)
        elif prefix == 'i:':
            inc.append(value)
        elif prefix == 'x:':
            exc.append(value)
        elif prefix == 'r:':
            if rev is not None:
                raise error.ParseError(_('_matchfiles expected at most one '
                                         'revision'))
            rev = value
        elif prefix == 'd:':
            if default is not None:
                raise error.ParseError(_('_matchfiles expected at most one '
                                         'default mode'))
            default = value
        else:
            raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
        if not hasset and matchmod.patkind(value) == 'set':
            hasset = True
    if not default:
        default = 'glob'
    m = None
    s = []
    for r in subset:
        c = repo[r]
        if not m or (hasset and rev is None):
            ctx = c
            if rev is not None:
                ctx = repo[rev or None]
            m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
                               exclude=exc, ctx=ctx, default=default)
        for f in c.files():
            if m(f):
                s.append(r)
                break
    return s
Exemple #23
0
def apply_promo(request, d1, d2):
    if "u" in request.GET and request.GET["u"]:
        userid = urllib.unquote(request.GET["u"].decode("utf-8"))
        result_list = {}
        # for each store-shelf
        shelf_per_store = find_shelf_store_based_for_user(userid)
        for i in range(0, len(shelf_per_store)):
            # how many items in this shelf
            store_name = stores[i]
            num_items = len(shelf_per_store[store_name])
            print "Apply_promo: We have " + str(num_items) + " items in " + store_name + " shelf."
            total_combinations = 1 << (num_items)
            total_combinations -= 1
            print "Total combination: " + str(total_combinations)
            date_ = datetime.date.today()
            promo_date = Promoinfo.objects.filter(d=date_)
            promo = promo_date.filter(store__id=i)

            # for all possible combinations
            # find the price by calling match.py
            # upper bound is total_combinations+1 because we are starting with index 1
            # and that is because we don't want to calculate discount for an empty wishlist
            # which will happen when j = 0
            itemlist = []
            for j in range(1, total_combinations + 1):
                wishlist = find_combination(shelf_per_store[store_name], j)
                cached_result, digest = check_if_combination_exists(wishlist)
                if cached_result == None:
                    print "No, didn't find result for list " + str(j) + " in cache, so storing it"
                    orig_cost, total_cost, savings, shipping = match.match(
                        store_name, date_, copy.deepcopy(wishlist), promo
                    )
                    # store this result
                    new_result = StoreItemCombinationResults(
                        combination_id=digest, price=orig_cost, saleprice=total_cost, free_shipping=shipping
                    )
                    new_result.save()
                else:
                    print "Great, found the result! Using it here."
                    orig_cost = cached_result.price
                    total_cost = cached_result.saleprice
                    savings = cached_result.price - cached_result.saleprice
                    shipping = cached_result.free_shipping

                print "RESULT:: " + str(j) + " " + str(store_name) + " " + str(orig_cost) + " " + str(
                    total_cost
                ) + " " + str(savings)
                itemlist.append(
                    {"orig_cost": orig_cost, "total_cost": total_cost, "savings": savings, "shipping": shipping}
                )

            result_list[store_name] = itemlist
        return list_detail.object_list(
            request,
            queryset=WishlistI.objects.none(),
            template_name="apply_promo.html",
            extra_context={"uid": userid, "result_list": result_list},
        )
Exemple #24
0
def matchcapthca():
    """
    generate captcha.
    """
    captcha = ""
    for n in range(len(os.listdir(_CROPS))):
        char = match(_CROPS + "char" + str(n) + ".png")
        captcha += str(char)
    return captcha
Exemple #25
0
def test_match_supplying_cleaned_text_tokenized_phrase():
    ''' Unit test for match.match() with no supplied cleaned text, matching on a tokenized phrase '''
    # data from the Twitter Political Corpus: https://www.usna.edu/Users/cs/nchamber/data/twitter/
    original_text = """I refuse to be a Socialist!! I had fun last night thanks Court! ... http://lnk.ms/3DZ1C  It’s called “communism,†folks. http://bit.ly/RedFL"""
    cleaned_text = """I refuse to be a Socialist!! I had fun last night thanks Court! ... http://lnk.ms/3DZ1C  It's called \"communism,\" folks. http://bit.ly/RedFL"""
    tokenized_phrase = ["It", "'s", "called", '"', "communism", ",", '"']
    gold = [(89, 113, 'It’s called “communi')]  # TODO: obviously not quite right...consider what can be done
    current = match.match(original_text, tokenized_phrase, clean_text=cleaned_text)
    eq_(current, gold)
def find_price_of_wishlist_for_store(wishlist, store_name, store_id, date_):
    '''
    Find the total price for the items in the wishlist from store_name and on date_
    '''
    promo_date = Promoinfo.objects.filter(d = date_)
    promo_store = promo_date.filter(store__id = store_id)
    promo = promo_store
    orig_cost, total_cost, savings, shipping = match.match(store_name, date_, copy.deepcopy(wishlist), promo)
    return (orig_cost, total_cost, savings, shipping)
Exemple #27
0
def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
    if not globbed and default == 'relpath':
        pats = expandpats(pats or [])
    m = _match.match(repo.root, repo.getcwd(), pats,
                    opts.get('include'), opts.get('exclude'), default)
    def badfn(f, msg):
        repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
    m.bad = badfn
    return m
Exemple #28
0
def matchcapthca():
    """
    generate captcha.
    """
    captcha = ""
    for n in range(len(os.listdir(_CROPS))):
        char = match(_CROPS + "char" + str(n) + ".png")
        captcha += str(char)
    return captcha
def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
    if not globbed and default == 'relpath':
        pats = expandpats(pats or [])
    m = _match.match(repo.root, repo.getcwd(), pats,
                    opts.get('include'), opts.get('exclude'), default)
    def badfn(f, msg):
        repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
    m.bad = badfn
    return m
 def match(self, pats=[], include=None, exclude=None, default='glob'):
     r = self._repo
     return matchmod.match(r.root,
                           r.getcwd(),
                           pats,
                           include,
                           exclude,
                           default,
                           auditor=r.auditor,
                           ctx=self)
def find_price_of_wishlist_for_store(wishlist, store_name, store_id, date_):
    '''
    Find the total price for the items in the wishlist from store_name and on date_
    '''
    promo_date = Promoinfo.objects.filter(d=date_)
    promo_store = promo_date.filter(store__id=store_id)
    promo = promo_store
    orig_cost, total_cost, savings, shipping = match.match(
        store_name, date_, copy.deepcopy(wishlist), promo)
    return (orig_cost, total_cost, savings, shipping)
Exemple #32
0
def test_match_without_supplying_cleaned_text_tokenized_phrase():
    ''' Unit test for match.match() with no user-supplied cleaned text, matching on a tokenized phrase '''
    # https://www.poetryfoundation.org/poems/47247/in-just
    original_text = """in Just-\nspring          when the world is mud-\nluscious the little\nlame balloonman\n\nwhistles          far          and wee\n\nand eddieandbill come\n
running from marbles and\npiracies and it's\nspring\n\nwhen the world is puddle-wonderful\n\nthe queer\nold balloonman whistles\nfar          and             wee\nand bettyandisbel come dancing\n\nfrom hop-scotch and jump-rope and\n\nit's\nspring\nand\n\n         the\n\n                  goat-footed\n\nballoonMan          whistles\nfar\nand\nwee"""
    # see issue #7
    tokenized_phrase = ["marbles", "and", "piracies"]
    gold = [(161, 181, 'marbles and\npiracies')]
    current = match.match(original_text, tokenized_phrase)
    eq_(current, gold)
def attempt(num,expected,e1,e2):
   rs = False
   try:
      num_tests[0]+=1
      rs = (expected == match.match(e1,e2))
   finally:
      if( rs != True):
         global_fail_flag = True
         fail_count[0]+= 1
         print("{:<10d} : {},{}".format(num,e1,e2) )
Exemple #34
0
def merge(segments, method = None, threshold = 0.5, groundplane = False):
    """
    Takes a list of segments and attempts to find a correspondance between
    them by returning a list of merged paths.

    Uses 'method' to score two candidate paths. If the score returned by
    'method' is greater than the number of overlaping frames times the 
    threshold, then the correspondance is considered bunk and a new path
    is created instead.

    In general, if 'method' returns 0 for a perfect match and 1 for a
    horrible match, then 'threshold' = 0.5 is pretty good.
    """
    if method is None:
        method = getpercentoverlap(groundplane)

    logger.debug("Starting to merge!")
    paths = {}
    segments.sort(key = lambda x: x.start)
    for path in segments[0].paths:
        paths[path.id] = path.getboxes(groundplane=groundplane), [path]
    for x, y in zip(segments, segments[1:]):
        logger.debug("Merging segments {0} and {1}".format(x.id, y.id))
        if x.stop < y.start:
            logger.debug("Segments {0} and {1} do not overlap"
                         .format(x.id, y.id))
            for path in y.paths:
                paths[path.id] = path.getboxes(groundplane=groundplane), [path]
        else:
            for first, second, score in match(x.paths, y.paths, method):
                logger.debug("{0} associated to {1} with score {2}"
                            .format(first, second, score))
                if second is None:
                    continue

                isbirth = first is None
                if not isbirth:
                    scorerequirement = threshold * overlapsize(first, second, groundplane)
                    if score > scorerequirement:
                        logger.debug("Score {0} exceeds merge threshold of {1}"
                                    .format(score, scorerequirement))
                        isbirth = True
                    else:
                        logger.debug("Score {0} satisfies merge threshold of "
                                     "{1}" .format(score, scorerequirement))

                if isbirth:
                    paths[second.id] = second.getboxes(groundplane=groundplane), [second]
                else:
                    path = mergepath(paths[first.id][0], second.getboxes(groundplane=groundplane))
                    paths[first.id][1].append(second)
                    paths[second.id] = (path, paths[first.id][1])
                    del paths[first.id]
    logger.debug("Done merging!")
    return paths.values()
Exemple #35
0
  def GET(self, name):

    user_data = web.input(input_song='../songs/loseyourself.mp3')
    print 'start'

    target_wav = '../songs/' + user_data.input_song[:-4] + '.wav'
    mp3towav.mp3towav('../songs/' + user_data.input_song, target_wav);
    print 'done conversion'

    print 'isolating', target_wav
    isolate.isolate(target_wav)
    print 'done isolation'


    bpm = None
    if user_data.input_song == 'animals.wav':
        bpm = 95
    elif user_data.input_song == 'payphone.wav':
        bpm = 110
    elif user_data.input_song == 'sugar.wav':
        bpm = 121
    elif user_data.input_song == 'loseyourself.wav':
        bpm = 116
    elif user_data.input_song == 'baby.wav':
        bpm = 128

    print 'speeding up', target_wav[:-4] + "_high.wav"
    speedup.ver3(target_wav[:-4] + "_high.wav")
    print 'done speedup'

    hi_wav = target_wav[:-4] + '_high-128.wav'
    audseg = match.match('bg/bg128_pop.wav', hi_wav);
    audseg.export('../www/public/uploads/edm_' + target_wav[9:-4] + "_finalpop.wav", format="wav")

    audseg = match.match('bg/bg128_calvinharris.wav', hi_wav);
    audseg.export('../www/public/uploads/edm_' + target_wav[9:-4] + "_finalcalvin.wav", format="wav")

    audseg = match.match('bg/bg128_oncelydian.wav', hi_wav);
    audseg.export('../www/public/uploads/edm_' + target_wav[9:-4] + "_finaloncelydian.wav", format="wav")
    print 'done export'

    return target_wav[:-4] + "_final.wav"
Exemple #36
0
def casual_match_view(request, match_id):
    mid = int(match_id)
    stats = match.match(mid)
    if stats is not None:
        t = loader.get_template('match.html')
        c = Context({'match_id': mid, 'stats': stats})
        return HttpResponse(t.render(c))
    else:
        t = loader.get_template('error.html')
        c = Context({'id': match_id})
        return HttpResponse(t.render(c))
Exemple #37
0
def casual_match_view(request, match_id):
    mid = int(match_id)
    stats = match.match(mid)
    if stats is not None:
        t = loader.get_template('match.html')
        c = Context({'match_id': mid, 'stats': stats})
        return HttpResponse(t.render(c))
    else:
        t = loader.get_template('error.html')
        c = Context({'id': match_id})
        return HttpResponse(t.render(c))
def main():
    point.readcsvfile('rb1701.csv')
    tick = len(point.sim_data) - 1
    makedata_num = 100000
    #init point
    point_2 = []
    point_3 = []
    point_4 = []
    point_5 = []
    pointlist_1 = []
    print "begin init"
    point_1_1, point_1_2, point_1_3, point_2, point_3, point_4, point_5 = init_point(
        tick)

    #run point
    for i in range(0, makedata_num):
        print "in the loop ", i, " times"
        orderlist = []
        cancel_list = []
        trade_list = []
        run_point_1_close(pointlist_1, orderlist, tick)
        run_point_1_open(point_1_1, point_1_1.volume, orderlist, pointlist_1,
                         tick)
        run_point_1_open(point_1_2, point_1_2.volume, orderlist, pointlist_1,
                         tick)
        run_point_1_open(point_1_2, point_1_2.volume, orderlist, pointlist_1,
                         tick)
        print "type 1 done"
        run_point_2(point_2, orderlist, tick)
        run_point_3(point_3, orderlist, tick, cancel_list)
        run_point_4(point_4, orderlist, tick)
        print "type 4 done"
        run_point_5(point_5, orderlist, tick)
        trade_back = match.match(cancel_list, orderlist)
        trade_list = trade_back[0]
        new_data = trade_back[1]
        receive_tradeorder(tradelist, point_1_1, point_1_2, point_1_3,
                           pointlist_1, point_2, point_3, point_4, point_5)
        point.sim_data.append(new_data)
        tick += 1
        print new_data
        raw_input("pause")

    for i in range(5, 100000):
        print "the tick num is ", i
        num = point_1_1.tradenum()
        point_1_1.update_n(-1 * num)
        for j in range(0, num):
            tem = point.trader_class_1_2()
            tem.get_price()
            order1 = tem.send_order()
            print order1.output()
        if num > 0:
            raw_input('pause')
def main():
    # students = {
    #     'ConnollyRachel': {'first': 'Rachel', 'last': 'Connolly', 'gender': ['Female'],
    #                  'cuny': 'Queens College', 'year': 1,
    #                  'cs_experience': 'Beginner', 'interests': [],
    #                  'ranked_companies': ['Company A', 'Company B', 'Company C'],
    #                  'matched_company': None}
    # }

    app_file = 'data/student_applications.csv'
    enrollment_file = 'data/student_enrollment.csv'
    student_pref_file = 'data/student_pref.csv'
    students = load_students(app_file, enrollment_file, student_pref_file)

    # companies = {
    #     'Company A': {'prefer': ['12345678'],'exclude': [],
    #                   'num_students': 5, 'team': [], 'sponsored': True, 'f1_j1': True},
    #     'Company B': {'prefer': [],'exclude': [],
    #                   'num_students': 5, 'team': [], 'sponsored': False, 'f1_j1': False},
    #     'Company C': {'prefer': [],'exclude': [],
    #                   'num_students': 5, 'team': [], 'sponsored': False, 'f1_j1': False}
    # }

    company_info_file = 'data/company_info.csv'
    company_pref_file = 'data/company_pref.csv'
    companies = load_companies(company_info_file, company_pref_file)

    # TODO: Validate files

    match_counter = 0
    while not valid_match(students, companies) and match_counter < 1000:
        students, companies = match(students, companies)
        match_counter += 1

    if match_counter == 1000:
        unmatched_students = [
            name for name in students.keys()
            if students[name]['matched_company'] == None
        ]
        with open('unmatched.csv', 'w') as f:
            f.write('Last, First, Emplid\n')
            for name in unmatched_students:
                f.write(
                    f"{students[name]['last']}, {students[name]['first']}, {students[name]['EMPLID']}"
                )

    with open('matches.csv', 'w') as f:
        f.write('Last, First, Emplid, Team\n')
        for name in students:
            if students[name]['matched_company']:
                f.write(
                    f"{students[name]['last']}, {students[name]['first']},{students[name]['EMPLID']},{students[name]['matched_company']}\n"
                )
    return
Exemple #40
0
def approach():
    response.set_header('Access-Control-Allow-Origin', '*')
    search = request.POST.get('search')
    print(search)
    cited = []
    with open('./esi.txt', 'r') as f:
        for line in f.readlines():
            cited.append(eval(line))
    m = match.match(search, cited)
    match_result = m.similar()
    data = {'data': match_result, 'status': 1}
    return json.dumps(data)
Exemple #41
0
def _picktool(repo, ui, path, binary, symlink):
    def check(tool, pat, symlink, binary):
        tmsg = tool
        if pat:
            tmsg += " specified for " + pat
        if not _findtool(ui, tool):
            if pat:  # explicitly requested tool deserves a warning
                ui.warn(_("couldn't find merge tool %s\n") % tmsg)
            else:  # configured but non-existing tools are more silent
                ui.note(_("couldn't find merge tool %s\n") % tmsg)
        elif symlink and not _toolbool(ui, tool, "symlink"):
            ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
        elif binary and not _toolbool(ui, tool, "binary"):
            ui.warn(_("tool %s can't handle binary\n") % tmsg)
        elif not util.gui() and _toolbool(ui, tool, "gui"):
            ui.warn(_("tool %s requires a GUI\n") % tmsg)
        else:
            return True
        return False

    # HGMERGE takes precedence
    hgmerge = os.environ.get("HGMERGE")
    if hgmerge:
        return (hgmerge, hgmerge)

    # then patterns
    for pat, tool in ui.configitems("merge-patterns"):
        mf = match.match(repo.root, '', [pat])
        if mf(path) and check(tool, pat, symlink, False):
            toolpath = _findtool(ui, tool)
            return (tool, '"' + toolpath + '"')

    # then merge tools
    tools = {}
    for k, v in ui.configitems("merge-tools"):
        t = k.split('.')[0]
        if t not in tools:
            tools[t] = int(_toolstr(ui, t, "priority", "0"))
    names = tools.keys()
    tools = sorted([(-p, t) for t, p in tools.items()])
    uimerge = ui.config("ui", "merge")
    if uimerge:
        if uimerge not in names:
            return (uimerge, uimerge)
        tools.insert(0, (None, uimerge))  # highest priority
    tools.append((None, "hgmerge"))  # the old default, if found
    for p, t in tools:
        if check(t, None, symlink, binary):
            toolpath = _findtool(ui, t)
            return (t, '"' + toolpath + '"')
    # internal merge as last resort
    return (not (symlink or binary) and "internal:merge" or None, None)
Exemple #42
0
def _picktool(repo, ui, path, binary, symlink):
    def check(tool, pat, symlink, binary):
        tmsg = tool
        if pat:
            tmsg += " specified for " + pat
        if not _findtool(ui, tool):
            if pat: # explicitly requested tool deserves a warning
                ui.warn(_("couldn't find merge tool %s\n") % tmsg)
            else: # configured but non-existing tools are more silent
                ui.note(_("couldn't find merge tool %s\n") % tmsg)
        elif symlink and not _toolbool(ui, tool, "symlink"):
            ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
        elif binary and not _toolbool(ui, tool, "binary"):
            ui.warn(_("tool %s can't handle binary\n") % tmsg)
        elif not util.gui() and _toolbool(ui, tool, "gui"):
            ui.warn(_("tool %s requires a GUI\n") % tmsg)
        else:
            return True
        return False

    # HGMERGE takes precedence
    hgmerge = os.environ.get("HGMERGE")
    if hgmerge:
        return (hgmerge, hgmerge)

    # then patterns
    for pat, tool in ui.configitems("merge-patterns"):
        mf = match.match(repo.root, '', [pat])
        if mf(path) and check(tool, pat, symlink, False):
                toolpath = _findtool(ui, tool)
                return (tool, '"' + toolpath + '"')

    # then merge tools
    tools = {}
    for k,v in ui.configitems("merge-tools"):
        t = k.split('.')[0]
        if t not in tools:
            tools[t] = int(_toolstr(ui, t, "priority", "0"))
    names = tools.keys()
    tools = sorted([(-p,t) for t,p in tools.items()])
    uimerge = ui.config("ui", "merge")
    if uimerge:
        if uimerge not in names:
            return (uimerge, uimerge)
        tools.insert(0, (None, uimerge)) # highest priority
    tools.append((None, "hgmerge")) # the old default, if found
    for p,t in tools:
        if check(t, None, symlink, binary):
            toolpath = _findtool(ui, t)
            return (t, '"' + toolpath + '"')
    # internal merge as last resort
    return (not (symlink or binary) and "internal:merge" or None, None)
Exemple #43
0
	def __init__(self, source, row):
		self.match = match.match(source, row)
		self.school_name = self.match.school_name
		self.name_mascot = self.match.name_mascot
		self.representation = self.match.representation
		self.show_in_potential_matches = True

		# lists of tuples - (match, score)
		self.matches_sportsref = []
		self.matches_ncaa = []
		self.matches_espn = []
		self.matches_247 = []
		self.matches_rivals = []
Exemple #44
0
def hasfile(repo, subset, x):
    """``file(pattern)``
    Changesets affecting files matched by pattern.
    """
    # i18n: "file" is a keyword
    pat = getstring(x, _("file requires a pattern"))
    m = matchmod.match(repo.root, repo.getcwd(), [pat])
    s = []
    for r in subset:
        for f in repo[r].files():
            if m(f):
                s.append(r)
                continue
    return s
class main():
    print("Loading Resources...")
    nlp = load('en')
    m = match.match(nlp)
    a = action.action(nlp)
    while True:
        inp = input("Query: ")
        matches = m.find_match(inp.lower(), 0.90)
        best_match = m.get_dominant_res(matches)
        act = a.get_action(best_match, inp)
        if act is None:
            print(matches[2])
            act = m.get_da(matches[2])
        print("ANS: ", act)
Exemple #46
0
   def match(self, other, valuestore=None):
       '''
   matches against a pattern, use wilds() to generate wilds
 
   Example:
     a,b = wilds('a b')
     val = WildsResults()
     
     if exp.match(a(b + 4), val):
       print val.a
       print val.b
   '''
       import match
       return match.match(self, other, valuestore)
Exemple #47
0
    def do_POST(self):
        try:
            if self.path.endswith('train.html'):
                if self.headers['Content-type'] == "application/json":
                    length = int(self.headers['Content-Length'])
                    data  = json.loads(self.rfile.read(length))
                    username = data['username']
                     
                    #need decryption
                    pattern = data['ciphertext']


                    #Check user credential


                    #predict
                    model = self.DM.getUserTrainedPattern(username)
                    predict_rlt, similarity = match(pattern, model)

                   
                    #train
                    if predict_rlt:
                        self.DM.insertUserPattern(username, pattern)
                        self.count = self.count+1
                        if self.count >= 10:
                            self.count = 0
                            #retrain and update trainedPattern
                            allPatterns = self.DM.getAllPatterns(username)
                            new_model = train(allPatterns)
                            self.DM.updateUserTrainedPattern(username, new_model)
                    else:
                        pass
                    

                    #send result
                    self.send_response(200)
                    self.send_header('Content-type','text-html')
                    self.end_headers()
                    self.wfile.write("result")



            elif self.path.endswith('auth.html'):
                #create the account
                pass
                

        except IOError as e:
            self.send_error(str(e))
        pass
Exemple #48
0
 def match(self, other, valuestore=None):
   '''
   matches against a pattern, use wilds() to generate wilds
 
   Example:
     a,b = wilds('a b')
     val = WildsResults()
     
     if exp.match(a(b + 4), val):
       print val.a
       print val.b
   '''
   import match
   return match.match(self, other, valuestore)
Exemple #49
0
def ignore(root, files, warn):
    '''return matcher covering patterns in 'files'.

    the files parsed for patterns include:
    .hgignore in the repository root
    any additional files specified in the [ui] section of ~/.hgrc

    trailing white space is dropped.
    the escape character is backslash.
    comments start with #.
    empty lines are skipped.

    lines can be of the following formats:

    syntax: regexp # defaults following lines to non-rooted regexps
    syntax: glob   # defaults following lines to non-rooted globs
    re:pattern     # non-rooted regular expression
    glob:pattern   # non-rooted glob
    pattern        # pattern of the current default type'''

    pats = readpats(root, files, warn)

    allpats = []
    for f, patlist in pats:
        allpats.extend(patlist)
    if not allpats:
        return util.never

    try:
        ignorefunc = match.match(root, '', [], allpats)
    except util.Abort:
        # Re-raise an exception where the src is the right file
        for f, patlist in pats:
            try:
                match.match(root, '', [], patlist)
            except util.Abort, inst:
                raise util.Abort('%s: %s' % (f, inst[0]))
Exemple #50
0
    def _ignore(self):
        files = []
        if os.path.exists(self._join('.hgignore')):
            files.append(self._join('.hgignore'))
        for name, path in self._ui.configitems("ui"):
            if name == 'ignore' or name.startswith('ignore.'):
                # we need to use os.path.join here rather than self._join
                # because path is arbitrary and user-specified
                files.append(os.path.join(self._rootdir, util.expandpath(path)))

        if not files:
            return util.never

        pats = ['include:%s' % f for f in files]
        return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
Exemple #51
0
def parse(test_str):
    # match the string to its main connector
    val = match.match(test_str)
    if val is None:
        return None
    else:
        # split the result into a connector type and the internal matches
        [kind, matches] = val
        # construct a statement (recursively) based on the connector type
        if kind == "lit":
            return statement.Statement(kind, matches)
        elif kind == "~":
            return statement.Statement(kind, parse(matches))
        else:
            return statement.Statement(kind, parse(matches[0]), parse(matches[1]))
Exemple #52
0
def addBuildings(user_id):
    with open(
            '/Users/Eric/Documents/EE695/specialProject/jsonFiles/user_buildings/%s_tweets_building.json'
            % user_id, 'w') as jsonout:
        with open(
                '/Users/Eric/Documents/EE695/specialProject/jsonFiles/user_timelines/%s_tweets.json'
                % user_id, 'r') as jsonin:
            content = json.load(jsonin)
            result = {
                "type": "FeatureCollection",
                "screen_name": content["screen_name"],
                "total_tweets": 0,
                "total_favorite_count": content["total_favorite_count"],
                "followers_count": content["followers_count"],
                "friends_count": content["friends_count"],
                "features": []
            }
            with open(
                    '/Users/Eric/Documents/EE695/specialProject/jsonFiles/buildings.json',
                    'r') as f:
                tweet = json.load(f)
                for data in content["tweets"]:
                    if data["coordinates"]:
                        lat = data["coordinates"]["coordinates"][0]
                        long = data["coordinates"]["coordinates"][1]
                        features = {
                            "properties": {
                                "text": data["text"],
                                "retweet_count": data["retweet_count"],
                                "id": data["id"],
                                "created_at": data["created_at"],
                                "favorite_count": data["favorite_count"],
                            },
                            "geometry": {
                                "type": "Point",
                                "coordinates": [lat, long]
                            },
                            "buildings": []
                        }

                        for tw in tweet['features']:
                            coordinate = tw['geometry']['coordinates']
                            if match.match(lat, long, coordinate):
                                features["buildings"].append(tw["properties"])
                        result["features"].append(features)
                        result["total_tweets"] = len(result["features"])
        jsonout.write(json.dumps(result, indent=4))
    jsonout.close()
Exemple #53
0
def parse(test_str):
    # match the string to its main connector
    val = match.match(test_str)
    if val is None:
        return None
    else:
        # split the result into a connector type and the internal matches
        [kind, matches] = val
        # construct a statement (recursively) based on the connector type
        if kind == "lit":
            return statement.Statement(kind, matches)
        elif kind == "~":
            return statement.Statement(kind, parse(matches))
        else:
            return statement.Statement(kind, parse(matches[0]),
                                       parse(matches[1]))
Exemple #54
0
def test_match_supplying_cleaned_text_single_word():
    ''' Unit test for match.match() with user-supplied cleaned text, matching on a single word '''
    # text from https://www.nytimes.com/2019/09/24/science/cats-humans-bonding.html
    original_text = """Dogs are man’s best friend. They’re sociable, faithful and obedient. Our relationship with cats, on the other hand, is often described as more transactional. Aloof, mysterious and independent, cats are with us only because we feed them.

Or maybe not. On Monday, researchers reported that cats are just as strongly bonded to us as dogs or infants, vindicating cat lovers across the land.

“I get that a lot — ‘Well, I knew that, I know that cats like to interact with me,’” said Kristyn Vitale, an animal behavior scientist at Oregon State University and lead author of the new study, published in Current Biology. “But in science, you don’t know that until you test it.”"""
    
    cleaned_text = """Dogs are man's best friend. They're sociable, faithful and obedient. Our relationship with cats, on the other hand, is often described as more transactional. Aloof, mysterious and independent, cats are with us only because we feed them.

Or maybe not. On Monday, researchers reported that cats are just as strongly bonded to us as dogs or infants, vindicating cat lovers across the land.

\"I get that a lot - 'Well, I knew that, I know that cats like to interact with me,'\" said Kristyn Vitale, an animal behavior scientist at Oregon State University and lead author of the new study, published in Current Biology. "But in science, you don’t know that until you test it.\""""
    gold = [(91, 95, 'cats'), (193, 197, 'cats'), (289, 293, 'cats'), (441, 445, 'cats')]
    current = match.match(original_text, "cats", clean_text=cleaned_text)
    eq_(current, gold)
Exemple #55
0
def hasfile(repo, subset, x):
    """``file(pattern)``
    Changesets affecting files matched by pattern.
    """
    # i18n: "file" is a keyword
    pat = getstring(x, _("file requires a pattern"))
    m = None
    s = []
    for r in subset:
        c = repo[r]
        if not m or matchmod.patkind(pat) == 'set':
            m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
        for f in c.files():
            if m(f):
                s.append(r)
                break
    return s
    def test_negation(self):
        # define the test cases
        cases = {
          "~A": "A",
          "~(AvB)": "AvB",
          "~(A^B)": "A^B",
          "~(A->B)": "A->B",
          "~(A<->B)": "A<->B",
          "~(~A^~B)": "~A^~B",
          "~(Av(B^C))": "Av(B^C)"
        }

        for case in cases:
            val = match(case)
            self.assertIsNotNone(val, msg="match(%s) == None" % case)
            [kind, matches] = val
            self.assertEqual(kind, "~")
            self.assertEqual(matches, cases[case])
Exemple #57
0
	def createMatch(self, __matchName, __matchPassword, __beatmapID, __beatmapName, __beatmapMD5, __gameMode, __hostUserID):
		"""
		Add a new match to matches list

		__matchName -- match name, string
		__matchPassword -- match md5 password. Leave empty for no password
		__beatmapID -- beatmap ID
		__beatmapName -- beatmap name, string
		__beatmapMD5 -- beatmap md5 hash, string
		__gameMode -- game mode ID. See gameModes.py
		__hostUserID -- user id of who created the match
		return -- match ID
		"""
		# Add a new match to matches list
		matchID = self.lastID
		self.lastID+=1
		self.matches[matchID] = match.match(matchID, __matchName, __matchPassword, __beatmapID, __beatmapName, __beatmapMD5, __gameMode, __hostUserID)
		return matchID