Esempio n. 1
0
 def get(self, user_id, max_return):
     try:
         oe = os.environ
         conn = psycopg2.connect(database=oe['DB_NAME'],
                                 user=oe['DB_USER'],
                                 password=oe['DB_PASSWORD'],
                                 host=oe['DB_HOST'])
     except Exception as e:
         return str(e)
     mc = Matcher(conn)
     return json.dumps(mc.query(user_id, max_return))
Esempio n. 2
0
 def get(self, user_id, max_return):
     try:
         oe = os.environ
         conn = psycopg2.connect(database=oe['DB_NAME'],
                                 user=oe['DB_USER'],
                                 password=oe['DB_PASSWORD'],
                                 host=oe['DB_HOST'])
     except Exception as e:
         return str(e)
     mc = Matcher(conn)
     return json.dumps(mc.query(user_id, max_return))
Esempio n. 3
0
    def __init__(self, cfg):
        self.cfg_ = self.build_cfg(cfg)
        self.detector_ = cv2.FastFeatureDetector_create(threshold=19,
                                                        nonmaxSuppression=True)
        self.extractor_ = cv2.ORB_create(2048, edgeThreshold=19)
        #self.extractor_ = cv2.xfeatures2d.SURF_create()
        self.matcher_ = Matcher(ex=self.extractor_)
        self.tracker_ = Tracker(pLK=cfg['pLK'])
        self.kf_ = build_ekf()
        self.db_ = self.build_db()
        self.state_ = PipelineState.INIT

        # higher-level handles?
        self.initializer_ = MapInitializer(db=self.build_db(),
                                           matcher=self.matcher_,
                                           tracker=self.tracker_,
                                           cfg=self.cfg_)
Esempio n. 4
0
    def __init__(self,
                 gpu_id=0,
                 track_model=None,
                 pose_model=None,
                 embedding_model=None):
        if self.tracker_flag:
            self.tracker = SiamFCTracker(gpu_id, track_model)
        self.posenet = PoseNet(gpu_id, pose_model)

        self.matcher = Matcher()
        print('----------------------------------------')
        print('Flag parameters are set as follow:')
        print('Tracker flag: {}'.format(self.tracker_flag))
        print('Tracker update flag: {}'.format(self.tracker_update_flag))
        print('Decrease tracker flag: {}'.format(self.descrease_tracker_flag))
        print('New embedding(with pose) flag: {}'.format(
            self.new_embedding_flag))
        print('----------------------------------------')
Esempio n. 5
0
def ontology_alignment(model, ontoTerms_a, ontoTerms_b, words, ceil = 0.5):

    with open(ontoTerms_a) as f:
        ontoText_a = f.readlines()
    with open(ontoTerms_b) as f:
        ontoText_b = f.readlines()
    # Remove whitespace characters like `\n` at the end of each line.
    ontoText_a = [x.strip() for x in ontoText_a] 
    ontoText_b = [x.strip() for x in ontoText_b]

    whole = []
    for text_a in ontoText_a:
        for text_b in ontoText_b:
            txt_a = re.sub(' +',' ',text_a)
            txt_b = re.sub(' +',' ',text_b)
            if txt_a == txt_b:
                whole.append([text_a, text_b, 0.0])
                try:
                    ontoText_a.remove(text_a)
                except ValueError:
                    pass
                    #print(text_a)
                try:
                    ontoText_b.remove(text_b)
                except ValueError:
                    pass
                    #print(text_b)
    # Transform to Word & Mask vectors to apply "feedforward_function"
    ontoData_a, ontoData_b = [], []
    for sentence in ontoText_a:
        ontoData_a.append(getSeq(sentence, words))
    for sentence in ontoText_b:
        ontoData_b.append(getSeq(sentence, words))
    x1,m1 = utils.prepare_data(ontoData_a)
    x2,m2 = utils.prepare_data(ontoData_b)
    OntoEmbg_a = model.feedforward_function(x1,m1)
    OntoEmbg_b = model.feedforward_function(x2,m2)
    # Compute the Cosine Distances:
    dist = cosine_distances(OntoEmbg_a,OntoEmbg_b)
    disT = np.transpose(dist)

    
    males    = preferances(dist)
    females  = preferances(disT)
    del(disT)
    match = Matcher(males, females)
    marriage = match()
    del(males); del(females)

    for key, value in marriage.items():
        man         = ontoText_a[value]
        woman       = ontoText_b[key]
        value       = dist[value][key]
        if value < ceil:
            whole.append([man, woman, value])
    return whole
Esempio n. 6
0
def do_match():
    print("Mentors:")
    print(MR)
    print("Mentee:")
    print(ME)

    # initialize Matcher with preference lists for both men and women
    match = Matcher(MR, ME)

    return match()
Esempio n. 7
0
 def __init__(
         self,
         gpu_id=[0, 0, 0, 0],
         flag=[True, False, True, False],
         #track_model='/export/home/zby/SiamFC/models/output/siamfc_35.pth',
         track_model='/export/home/zby/SiamFC/models/output/siamfc_20.pth',
         detection_model='/export/home/zby/SiamFC/models/res101_old/pascal_voc/faster_rcnn_1_25_4379.pth',
         pose_model='/export/home/zby/SiamFC/data/models/final_new.pth.tar',
         embedding_model='/export/home/zby/SiamFC/models/embedding_model.pth'
 ):
     if flag[0]:
         self.tracker = SiamFCTracker(gpu_id[0], track_model)  #input RGB
     if flag[1]:
         self.detector = Detector(gpu_id[1], detection_model)  #input BGR
     if flag[2]:
         self.posenet = PoseNet(gpu_id[2], pose_model)  #input BGR
     if flag[3]:
         self.embedder = EmbeddingNet(gpu_id[3], embedding_model)
     #self.tracker = SiamFCTracker(gpu_id[0], track_model)
     self.matcher = Matcher()
     print('----------------------------------------')
 def __init__(
         self,
         gpu_id=0,
         track_model='/export/home/zby/SiamFC/models/output/siamfc_35_old.pth',
         #track_model = '/export/home/zby/SiamFC/data/models/siamfc_cpm_368_5.pth',
         pose_model='/export/home/zby/SiamFC/data/models/final_new.pth.tar',
         embedding_model='/export/home/zby/SiamFC/models/embedding_model.pth'
 ):
     if self.tracker_flag:
         self.tracker = SiamFCTracker(gpu_id, track_model)
     self.posenet = PoseNet(gpu_id, pose_model)
     if not self.new_embedding_flag:
         self.embedder = EmbeddingNet(gpu_id, embedding_model)
     self.matcher = Matcher()
     print('----------------------------------------')
     print('Flag parameters are set as follow:')
     print('Tracker flag: {}'.format(self.tracker_flag))
     print('Tracker update flag: {}'.format(self.tracker_update_flag))
     print('Decrease tracker flag: {}'.format(self.descrease_tracker_flag))
     print('New embedding(with pose) flag: {}'.format(
         self.new_embedding_flag))
     print('----------------------------------------')
Esempio n. 9
0
 def get(self, user_id, max_return, needs):
     mc = Matcher()
     # return json.dumps(mc.query(user_id, max_return, needs))
     return json.dumps(mc.query(user_id, max_return, 'all'))
Esempio n. 10
0
def main():
    m = Matcher()
    m.fit(TRAIN_FILE0, TRAIN_FILE1, TRAIN_MATCH_FILE)
    m.transform(TEST_FILE0, TEST_FILE1)
    m.write(OUTPUT_FILE)
def setup(v_, j_, flipped):
    # the volunteers and their list of ordered job preferences
    # v_ = dict((m, prefs.split(', ')) for [m, prefs] in (
    #     line.rstrip().split(': ') for line in open('volunteers.short.txt')))
    # j_ = dict((m, prefs.split(', ')) for [m, prefs] in (
    #     line.rstrip().split(': ') for line in open('jobs.txt')))
    volunteers = list(v_.keys())
    jobs = list(j_.keys())

    # print('type v_["abe"]', type(v_['abe']), volunteers)
    # remove any missing job names from volunteers

    for v in volunteers:
        NA = v_[v][-1]
        v_[v] = list(filter(lambda j: j in jobs, v_[v][:-1]))
        v_[v].append(NA)

    # remove any missing volunteer names from jobs

    for j in jobs:
        NA = j_[j][-1]
        j_[j] = list(filter(lambda v: v in volunteers, j_[j][:-1]))
        j_[j].append(NA)

    J = {}
    prefs = v_[list(v_.keys())[0]]
    print('type(prefs)', type(prefs))

    # prefs = prefs_.split(', ')
    # NA = prefs[-1]
    for p in jobs:
        w__ = j_[p]
        J[Person(p, int(w__[-1]))] = w__[:-1]
    print(f'initial J keys {J.keys()}')

    V = {}
    prefs = j_[list(j_.keys())[0]]
    # prefs = prefs_.split(', ')
    for p in volunteers:
        m__ = v_.get(p, ['0'])
        person = Person(p, int(m__[-1]))
        V[person] = []
        for n in m__[:-1]:
            #            print('n', n)
            job = list(filter(lambda j: j.n == n, J.keys()))[0]
            V[person].append(job)

    for j, prefs in J.items():
        J[j] = []
        for n in prefs:
            volunteer = list(filter(lambda m: m.n == n, V.keys()))[0]
            J[j].append(volunteer)

    # for each volunteer construct a list of forbidden jobs
    forbidden = {}  # { 'dan': ['gay', 'eve', 'abi'], 'hal': ['eve'] }
    for v, prefs in V.items():
        NA = v.NA
        # all jobs at or over the NA index are forbidden
        forbidden[v] = prefs[NA:]
        # n = random.randint(0, len(prefs) - 1)
        # forbidden[m] = random.sample(prefs, n)  # random sample of n wives

    forbidden_v = {}  # { 'dan': ['gay', 'eve', 'abi'], 'hal': ['eve'] }
    for j, prefs in J.items():
        NA = j.NA
        # all volunteers at or over the NA index are forbidden
        forbidden_v[j] = prefs[NA:]

    C = defaultdict(list)
    jKeys = set()
    loop = 0
    while len(J) > 0:
        print("V & J")
        print(V)
        print(J)
        match = Matcher(V, J, forbidden)  # , forbidden_v)

        # match volunteers and jobs; returns a mapping of jobs to volunteers
        matches = match()
        assert match.is_stable(matches)  # should be a stable matching
        print('stable match')

        print(f'loop {loop} list(matches keys) {list(matches.keys())}')
        loop += 1
        # if loop > 2:
        #     break
        # if len(C) == 0:
        #     C = dict((value, [key]) for key, value in enumerate(matches))
        #     print('Initial C.keys()', C.keys())
        # else:
        for _, key in enumerate(matches.items()):
            C[key[1]].append(key[0])
        print('Initial C.keys()', C.keys())
        print('Initial C.values()', C.values())
        jKeys |= set(matches.keys())

        print(f"len jKeys {len(jKeys)}  len(J) {len(J)} jKeys {jKeys}  ")
        J_ = copy.copy(J)
        J = {}
        for key, value in enumerate(J_.items()):
            print(
                f'J.items() key {key}  value[0] {value[0]}  type(value[0])  {type(value[0])} value[1] {value[1]}'
            )
            if value[0] in jKeys:
                print(f'value {value[0]} in jKeys)')
            else:
                print(f'value {value[0]} NOT in jKeys)')
                J[value[0]] = value[1]
        print(f'len filtered J {len(J)}  J {J}')
        if len(J) == 0:
            break
        V_ = copy.copy(V)
        for v, prefs in V_.items():
            # print(f'k,v in V k {k}  v {v}')
            prefs = [p for p in prefs if p in list(J.keys())]
            print(f'new prefs {prefs}')
            V[v] = prefs
        # V = {k: v for k, v in mydict.items() if k.startswith('foo')}

        # J = dict((key, value) in enumerate(J.items()))

        # J = dict((key, value) in enumerate(J.items()) if key not in jKeys)
        # J = dict(filter(lamba j, v: j not in jKeys, enumerate(J))
    # if len(J) > 0:
    #     print("len(J) > 0")
    #     V_ = sorted(C.items(), key=lambda kv: len(kv[1]))[:len(J)]
    #     match = Matcher(V_, J, forbidden, forbidden_v)

    #     # match volunteers and jobs; returns a mapping of jobs to volunteers
    #     matches = match()
    #     for key, value in enumerate(matches):
    #         C[value].append(key)

    # print('jobs', jobs)
    print('C.keys()', C.keys())
    print([(key, value) for key, value in enumerate(C)])
    if flipped:
        a = [([j.n for j in value[1]], value[0].n)
             for key, value in enumerate(C.items())]
    else:
        a = [(value[0].n, [j.n for j in value[1]])
             for key, value in enumerate(C.items())]
    # a = [(key.n, [j.n for j in C[key]]) for key in list(C.keys())]

    # a=[(matches[key].n, key.n) for key in list(matches.keys())]
    return jsonify(a)
Esempio n. 12
0
from match import Matcher

# the men and their list of ordered spousal preferences
M = dict(
    (m, prefs.split(', '))
    for [m, prefs] in (line.rstrip().split(': ') for line in open('men.txt')))

# the women and their list of ordered spousal preferences
W = dict(
    (m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                              for line in open('women.txt')))

# initialize Matcher with preference lists for both men and women
match = Matcher(M, W)

# match men and women; returns a mapping of wives to husbands
wives = match()
assert match.is_stable()  # should be a stable matching

# swap the husbands of two wives, which should make the matching unstable
wives['fay'], wives['gay'] = wives['gay'], wives['fay']

assert match.is_stable(wives) is False  # should not be a stable matching

# with the perturbed matching we find that gav's marriage to fay is unstable:
#
#   * gav prefers gay over fay
#   * gay prefers gav over her current husband dan
Esempio n. 13
0
class Pipeline(object):
    def __init__(self, cfg):
        self.cfg_ = self.build_cfg(cfg)
        self.detector_ = cv2.FastFeatureDetector_create(threshold=19,
                                                        nonmaxSuppression=True)
        self.extractor_ = cv2.ORB_create(2048, edgeThreshold=19)
        #self.extractor_ = cv2.xfeatures2d.SURF_create()
        self.matcher_ = Matcher(ex=self.extractor_)
        self.tracker_ = Tracker(pLK=cfg['pLK'])
        self.kf_ = build_ekf()
        self.db_ = self.build_db()
        self.state_ = PipelineState.INIT

        # higher-level handles?
        self.initializer_ = MapInitializer(db=self.build_db(),
                                           matcher=self.matcher_,
                                           tracker=self.tracker_,
                                           cfg=self.cfg_)

    def build_cfg(self, cfg):
        # build derived values

        # apply scale
        w = int(cfg['scale'] * cfg['w'])
        h = int(cfg['scale'] * cfg['h'])
        K0 = cfg['K']
        K = cfg['scale'] * cfg['K']
        K[2, 2] = 1.0

        # image shape
        shape = (h, w, 3)  # TODO : handle monochrome

        # first, make a copy from argument
        cfg = dict(cfg)

        # insert derived values
        cfg['w'] = w
        cfg['h'] = h
        cfg['shape'] = shape
        cfg['K0'] = K0
        cfg['K'] = K

        # create dynamic type
        #ks = cfg.keys()
        #cfg_t = namedtuple('PipelineConfig', ks)
        # setup dot-referenced aliases
        # for k, v in cfg.iteritems():
        #    setattr(cfg, k, v)
        return cfg

    def build_db(self):
        cfg = self.cfg_
        ex = self.extractor_
        img_fmt = (cfg['shape'], np.uint8)
        dsc_t = (np.uint8 if ex.descriptorType() == cv2.CV_8U else np.float32)
        dsc_fmt = (self.extractor_.descriptorSize(), dsc_t)
        return DB(img_fmt=img_fmt, dsc_fmt=dsc_fmt)

    def motion_model(self, f0, f1, stamp, use_kalman=False):
        if not use_kalman:
            # simple `repetition` model
            txn0, rxn0 = f0['pose'][L_POS], f0['pose'][A_POS]
            txn1, rxn1 = f1['pose'][L_POS], f1['pose'][A_POS]
            R0 = tx.euler_matrix(*rxn0)
            R1 = tx.euler_matrix(*rxn1)

            T0 = tx.compose_matrix(angles=rxn0, translate=txn0)
            T1 = tx.compose_matrix(angles=rxn1, translate=txn1)

            Tv = np.dot(T1, vm.inv(T0))  # Tv * T0 = T1
            T2 = np.dot(Tv, T1)

            txn = tx.translation_from_matrix(T2)
            rxn = tx.euler_from_matrix(T2)

            x = f1['pose'].copy()
            P = f1['cov'].copy()
            x[0:3] = txn
            x[9:12] = rxn
            return x, P
        else:
            # dt MUST NOT BE None
            self.kf_.x = f0['pose']
            self.kf_.P = f0['cov']
            dt = (f1['stamp'] - f0['stamp'])
            self.kf_.predict(dt)

            txn, rxn = f1['pose'][L_POS], f1['pose'][A_POS]
            z = np.concatenate([txn, rxn])
            self.kf_.update(z)
            dt = (stamp - f1['stamp'])
            self.kf_.predict(dt)
            return self.kf_.x.copy(), self.kf_.P.copy()

    def is_keyframe(self, frame):
        # TODO : more robust keyframe heuristic
        # == possibly filter for richness of tracking features?
        feat = (frame['feat']).item()
        return len(feat.kpt) > 100  # TODO: arbitrary threshold

    def build_frame(self, img, stamp):
        """ build a simple frame """
        # automatic index assignment
        # NOTE: multiple frames will have the same index
        # if not appended to self.db_.frame
        # TODO : separate out feature processing parts?
        index = self.db_.frame.size

        # by default, not a keyframe
        is_kf = False

        # extract features
        kpt = self.detector_.detect(img)
        kpt, dsc = self.extractor_.compute(img, kpt)
        # kpt, dsc = self.extractor_.detectAndCompute(img, None)
        feat = Feature(kpt, dsc, cv2.KeyPoint.convert(kpt))

        # apply motion model? initialize pose anyway
        if self.db_.frame_.size >= 2:
            print('motion-model')
            x, P = self.motion_model(f0=self.db_.frame_[-2],
                                     f1=self.db_.frame_[-1],
                                     stamp=stamp,
                                     use_kalman=True)
        else:
            x = np.zeros(self.cfg_['state_size'])
            P = 1e-6 * np.eye(self.cfg_['state_size'])

        frame = (index, stamp, img, x, P, is_kf, feat)
        res = np.array(frame, dtype=self.db_.frame.dtype)
        return res

    def transition(self, new_state):
        print('[state] ({} -> {})'.format(self.state_, new_state))
        self.state_ = new_state

    def init_map(self, img, stamp, data):
        """ initialize map """
        # fetch prv+cur frames
        # populate frame from motion model
        frame = self.build_frame(img, stamp)
        suc = self.initializer_.compute(frame, data)
        # print(data['dbg-tv'])
        if not suc:
            return

        self.db_.extend(self.initializer_.db_)
        self.transition(PipelineState.TRACK)

        #print self.db_.landmark_['pos'][self.db_.landmark_['tri']][:5]
        #print self.initializer_.db_.landmark_['pos'][self.db_.landmark_['tri']][:5]

        if True:
            frame0 = self.db_.frame_[0]
            frame1 = self.db_.frame_[1]
            img0, img1 = frame0['image'], frame1['image']
            feat0, feat1 = frame0['feat'], frame1['feat']
            pt0m, pt1m = feat0.pt[data['mi0']], feat1.pt[data['mi1']]
            msk = data['msk_cld']

            print('frame pair : {}-{}'.format(frame0['index'],
                                              frame1['index']))

            viz0 = cv2.drawKeypoints(img0, feat0.kpt, None)
            viz1 = cv2.drawKeypoints(img1, feat1.kpt, None)
            viz = draw_matches(viz0, viz1, pt0m[msk], pt1m[msk])
            data['viz'] = viz

            # == if cfg['dbg-cloud']:
            dr_data = {}
            cld_viz, col_viz = DenseRec(self.cfg_['K']).compute(img0,
                                                                img1,
                                                                P1=data['P0'],
                                                                P2=data['P1'],
                                                                data=dr_data)
            cdist = vm.norm(cld_viz)
            data['cld_viz'] = cld_viz[cdist < np.percentile(cdist, 95)]
            data['col_viz'] = col_viz[cdist < np.percentile(cdist, 95)]
        self.initializer_.reset()
        return

    def bundle_adjust(self, frame0, frame1):
        idx0, idx1 = max(frame0['index'], frame1['index'] - 8), frame1['index']
        #idx0, idx1 = keyframe['index'], frame1['index']
        obs = self.db_.observation
        msk = np.logical_and(idx0 <= obs['src_idx'], obs['src_idx'] <= idx1)

        # parse observation
        i_src = obs['src_idx'][msk]
        #print('i_src', i_src)
        i_lmk = obs['lmk_idx'][msk]
        p_obs = obs['point'][msk]

        # index pruning relevant sources
        i_src_alt, i_a2s, i_s2a = index_remap(i_src)
        i_lmk_alt, i_a2l, i_l2a = index_remap(i_lmk)

        # 1. select targets based on new index
        i_src = i_s2a
        i_lmk = i_l2a
        frames = self.db_.frame[i_a2s[i_src_alt]]
        landmarks = self.db_.landmark[i_a2l[i_lmk_alt]]

        # parse data
        txn = frames['pose'][:, L_POS]
        rxn = frames['pose'][:, A_POS]
        lmk = landmarks['pos']

        data_ba = {}
        # NOTE : txn/rxn will be internally inverted to reduce duplicate compute.
        suc = BundleAdjustment(
            i_src,
            i_lmk,
            p_obs,  # << observation
            txn,
            rxn,
            lmk,
            self.cfg_['K'],
            axa=True).compute(data=data_ba)  # << data

        if suc:
            # TODO : apply post-processing kalman filter?
            #print('{}->{}'.format(txn, data_ba['txn']))
            #print('{}->{}'.format(rxn, data_ba['rxn']))
            txn = data_ba['txn']
            rxn = data_ba['rxn']
            lmk = data_ba['lmk']
            self.db_.frame['pose'][i_a2s[i_src_alt], L_POS] = txn
            self.db_.frame['pose'][i_a2s[i_src_alt], A_POS] = rxn
            self.db_.landmark['pos'][i_a2l[i_lmk_alt]] = lmk

    @profile(sort='cumtime')
    def track(self, img, stamp, data={}):
        """ Track landmarks"""
        # unroll data
        # fetch frame pair
        # TODO : add landmarks along the way
        # TODO : update landmarks through optimization
        mapframe = self.db_.keyframe[0]  # first keyframe = map frame
        keyframe = self.db_.keyframe[-1]  # last **keyframe**
        frame0 = self.db_.frame[-1]  # last **frame**
        frame1 = self.build_frame(img, stamp)
        # print('prior position',
        #         frame1['pose'][L_POS], frame1['pose'][A_POS])
        landmark = self.db_.landmark

        img1 = frame1['image']
        feat1 = frame1['feat'].item()

        # bypass match_local for already tracking points ...
        pt0_l = landmark['pt'][landmark['track']]
        pt1_l, msk_t = self.tracker_.track(frame0['image'],
                                           img1,
                                           pt0_l,
                                           return_msk=True)

        # apply tracking mask
        pt0_l = pt0_l[msk_t]
        pt1_l = pt1_l[msk_t]

        # update tracking status
        landmark['track'][landmark['track'].nonzero()[0][~msk_t]] = False
        landmark['pt'][landmark['track']] = pt1_l

        # search additional points
        cld0_l = landmark['pos'][~landmark['track']]
        dsc_l = landmark['dsc'][~landmark['track']]

        msk_prj = None
        if len(cld0_l) >= 128:
            # merge with projections
            pt0_cld_l = project_to_frame(cld0_l,
                                         source_frame=mapframe,
                                         target_frame=frame1,
                                         K=self.cfg_['K'],
                                         D=self.cfg_['D'])

            # in-frame projection mask
            msk_prj = np.logical_and.reduce([
                0 <= pt0_cld_l[..., 0],
                pt0_cld_l[..., 0] < self.cfg_['w'],
                0 <= pt0_cld_l[..., 1],
                pt0_cld_l[..., 1] < self.cfg_['h'],
            ])

        pt0 = pt0_l
        pt1 = pt1_l
        cld0 = landmark['pos'][landmark['track']]
        obs_lmk_idx = landmark['index'][landmark['track']]

        search_map = (False and len(cld0) <= 256 and (msk_prj is not None)
                      and (msk_prj.sum() >= 16))

        if search_map:
            # sample points from the map
            mi0, mi1 = match_local(
                pt0_cld_l[msk_prj],
                feat1.pt,
                dsc_l[msk_prj],
                feat1.dsc,
                hamming=(not feat1.dsc.dtype == np.float32),
            )

            # collect all parts
            pt0 = np.concatenate([pt0, pt0_cld_l[msk_prj][mi0]], axis=0)
            pt1 = np.concatenate([pt1, feat1.pt[mi1]], axis=0)
            cld0 = np.concatenate(
                [cld0, landmark['pos'][~landmark['track']][msk_prj][mi0]],
                axis=0)

            obs_lmk_idx = np.concatenate([
                obs_lmk_idx,
                landmark['index'][~landmark['track']][msk_prj][mi0]
            ],
                                         axis=0)

        # debug ...
        # pt_dbg = project_to_frame(
        #        landmark['pos'][landmark['track']],
        #        frame1,
        #        self.cfg_['K'], self.cfg_['D'])
        ##img_dbg = draw_points(img1.copy(), pt_dbg, color=(255,0,0) )
        ##draw_points(img_dbg, pt1, color=(0,0,255) )
        #img_dbg = draw_matches(img1, img1, pt_dbg, pt1)
        #cv2.imshow('dbg', img_dbg)
        #print_ratio(len(pt0_l), len(pt0), name='point source')

        # if len(mi0) <= 0:
        #    viz1 = draw_points(img1.copy(), pt0)
        #    viz2 = draw_points(img1.copy(), feat1.pt)
        #    viz = np.concatenate([viz1, viz2], axis=1)
        #    cv2.imshow('pnp', viz)
        #    return False

        #print_ratio(len(mi0), len(pt0))
        # suc, rvec, tvec = cv2.solvePnP(
        #        cld0[:, None], pt1[:, None],
        #        self.cfg_['K'], self.cfg_['D'],
        #        flags = cv2.SOLVEPNP_EPNP
        #        ) # T(rv,tv) . cld = cam
        #inl = None
        #print 'euler', tx.euler_from_matrix(cv2.Rodrigues(rvec)[0])

        T_i = tx.inverse_matrix(
            tx.compose_matrix(translate=frame1['pose'][L_POS],
                              angles=frame1['pose'][A_POS]))
        rvec0 = cv2.Rodrigues(T_i[:3, :3])[0]
        tvec0 = T_i[:3, 3:]

        if len(pt1) >= 1024:
            # prune
            nmx_idx = non_max(pt1, landmark['rsp'][obs_lmk_idx])
            print_ratio(len(nmx_idx), len(pt1), name='non_max')
            cld_pnp, pt1_pnp = cld0[nmx_idx], pt1[nmx_idx]
        else:
            cld_pnp, pt1_pnp = cld0, pt1

        # hmm ... pose-only BA vs. PnP
        if False:
            data_pnp = {}
            #print('txn-prv', frame0['pose'][L_POS])
            #print('rxn-prv', frame0['pose'][A_POS])
            #print('txn-in', frame1['pose'][L_POS])
            #print('rxn-in', frame1['pose'][A_POS])
            suc = BundleAdjustment(i_src=np.full(len(cld_pnp), 0),
                                   i_lmk=np.arange(len(cld_pnp)),
                                   p_obs=pt1_pnp,
                                   txn=frame1['pose'][L_POS][None, ...],
                                   rxn=frame1['pose'][A_POS][None, ...],
                                   lmk=cld_pnp,
                                   K=self.cfg_['K'],
                                   pose_only=True).compute(crit=dict(
                                       loss='soft_l1',
                                       xtol=1e-8,
                                       f_scale=np.sqrt(5.991)),
                                                           data=data_pnp)
            #print('txn-out', data_pnp['txn'][0])
            #print('rxn-out', data_pnp['rxn'][0])
            T = tx.compose_matrix(translate=data_pnp['txn'][0],
                                  angles=data_pnp['rxn'][0])
            Ti = tx.inverse_matrix(T)
            rxn_pnp = np.float32(tx.euler_from_matrix(Ti))
            txn_pnp = tx.translation_from_matrix(Ti)
            rvec = cv2.Rodrigues(Ti[:3, :3])[0]
            tvec = txn_pnp
            prj = cvu.project_points(cld_pnp, rvec, tvec, self.cfg_['K'],
                                     self.cfg_['D'])
            err = vm.norm(prj - pt1_pnp)
            inl = np.where(err <= 1.0)[0]
        else:
            # == if(cfg['dbg_pnp']):
            #print 'frame1-pose', frame1['pose']
            # dbg = draw_matches(img1, img1,
            #        project_to_frame(cld_pnp, source_frame=mapframe, target_frame=frame1,
            #        K=self.cfg_['K'], D=self.cfg_['D']),
            #        pt1_pnp)
            #cv2.imshow('pnp', dbg)
            # cv2.waitKey(0)

            suc, rvec, tvec, inl = cv2.solvePnPRansac(
                cld_pnp[:, None],
                pt1_pnp[:, None],
                self.cfg_['K'],
                self.cfg_['D'],
                useExtrinsicGuess=True,
                rvec=rvec0,
                tvec=tvec0,
                iterationsCount=1024,
                reprojectionError=1.0,
                confidence=0.999,
                flags=cv2.SOLVEPNP_EPNP
                # flags=cv2.SOLVEPNP_DLS
                # flags=cv2.SOLVEPNP_ITERATIVE
                # minInliersCount=0.5*_['pt0']
            )

        n_pnp_in = len(cld_pnp)
        n_pnp_out = len(inl) if (inl is not None) else 0
        #print 'inl', inl
        print n_pnp_in
        print n_pnp_out

        suc = (suc and (inl is not None)
               and (n_pnp_out >= 128 or n_pnp_out >= 0.25 * n_pnp_in))
        print('pnp success : {}'.format(suc))
        if inl is not None:
            print_ratio(n_pnp_out, n_pnp_in, name='pnp')

        # visualize match statistics
        viz_pt0 = project_to_frame(
            cld0,
            source_frame=mapframe,
            target_frame=keyframe,  # TODO: keyframe may no longer be true?
            K=self.cfg_['K'],
            D=self.cfg_['D'])
        viz_msk = np.logical_and.reduce([
            0 <= viz_pt0[:, 0],
            viz_pt0[:, 0] < self.cfg_['w'],
            0 <= viz_pt0[:, 1],
            viz_pt0[:, 1] < self.cfg_['h'],
        ])
        viz1 = draw_points(img1.copy(), feat1.pt)
        viz = draw_matches(keyframe['image'], viz1, viz_pt0[viz_msk],
                           pt1[viz_msk])
        data['viz'] = viz

        # obtained position!
        R = cv2.Rodrigues(rvec)[0]
        t = np.float32(tvec)
        R, t = vm.Rti(R, t)
        rxn = np.reshape(tx.euler_from_matrix(R), 3)
        txn = t.ravel()

        if suc:
            # print('pnp-txn', t)
            # print('pnp-rxn', tx.euler_from_matrix(R))
            # motion_update()
            if self.cfg_['kalman']:
                # kalman_update()
                self.kf_.x = frame0['pose']
                self.kf_.P = frame0['cov']
                self.kf_.predict(frame1['stamp'] - frame0['stamp'])
                self.kf_.update(np.concatenate([txn, rxn]))
                frame1['pose'] = self.kf_.x
                frame1['cov'] = self.kf_.P
            else:
                # hard_update()
                frame1['pose'][L_POS] = t.ravel()
                frame1['pose'][A_POS] = tx.euler_from_matrix(R)

            self.db_.observation.extend(
                dict(
                    # observation frame source
                    src_idx=np.full_like(obs_lmk_idx, frame1['index']),
                    lmk_idx=obs_lmk_idx,  # landmark index
                    point=pt1))
        self.db_.frame.append(frame1)
        x = 1

        need_kf = np.logical_or.reduce([
            not suc,  # PNP failed -- try new keyframe
            # PNP was decent but would be better to have a new frame
            suc and (n_pnp_out < 128),
            # = frame is somewhat stale
            (frame1['index'] - keyframe['index'] > 32) and (msk_t.sum() < 256)
        ]) and self.is_keyframe(frame1)
        #need_kf = False

        # ?? better criteria for running BA?
        run_ba = (frame1['index'] % 8) == 0
        #run_ba = False
        #run_ba = need_kf

        if run_ba:
            self.bundle_adjust(keyframe, frame1)

        if need_kf:
            for index in reversed(range(keyframe['index'], frame1['index'])):
                feat0, feat1 = self.db_.frame[index]['feat'], frame1[
                    'feat'].item()
                mi0, mi1 = self.matcher_.match(feat0.dsc,
                                               feat1.dsc,
                                               lowe=0.8,
                                               fold=False)
                data_tv = {}
                suc_tv, det_tv = TwoView(feat0.pt[mi0], feat1.pt[mi1],
                                         self.cfg_['K']).compute(data=data_tv)

                if suc_tv:
                    print('======================= NEW KEYFRAME ===')
                    xfm0 = pose_to_xfm(self.db_.frame[index]['pose'])
                    xfm1 = pose_to_xfm(frame1['pose'])
                    scale_ref = np.linalg.norm(
                        tx.translation_from_matrix(vm.Ti(xfm1).dot(xfm0)))
                    scale_tv = np.linalg.norm(data_tv['t'])
                    # TODO : does NOT consider "duplicate" landmark identities

                    # IMPORTANT: frame1  is a `copy` of "last_frame"
                    #frame1['is_kf'] = True
                    self.db_.frame[-1]['is_kf'] = True

                    lmk_idx0 = self.db_.landmark.size
                    print('lmk_idx0', lmk_idx0)
                    msk_cld = data_tv['msk_cld']
                    cld1 = data_tv['cld1'][msk_cld] * (scale_ref / scale_tv)
                    cld = transform_cloud(
                        cld1,
                        source_frame=frame1,
                        target_frame=mapframe,
                    )
                    col = extract_color(frame1['image'],
                                        feat1.pt[mi1][msk_cld])

                    local_map = dict(
                        index=lmk_idx0 + np.arange(len(cld)),  # landmark index
                        src=np.full(len(cld), frame1['index']),  # source index
                        dsc=feat1.dsc[mi1][msk_cld],  # landmark descriptor
                        rsp=[
                            feat1.kpt[i].response
                            for i in np.arange(len(feat1.kpt))[mi1][msk_cld]
                        ],  # response "strength"

                        # tracking point initialization
                        pt0=feat1.pt[mi1][msk_cld],
                        invd=1.0 / cld1[..., 2],
                        depth=cld1[..., 2],
                        pos=cld,  # landmark position [[ map frame ]]
                        # tracking status
                        track=np.ones(len(cld), dtype=np.bool),

                        # tracking point initialization
                        pt=feat1.pt[mi1][msk_cld],
                        tri=np.ones(len(cld), dtype=np.bool),
                        col=col,  # debug : point color information
                    )
                    # hmm?
                    self.db_.landmark.extend(local_map)
                    break
            else:
                print('Attempted new keyframe but failed')

    def process(self, img, stamp, data={}):
        if self.state_ == PipelineState.IDLE:
            return
        # if self.state_ == PipelineState.NEED_REF:
        #     return self.init_ref(img, stamp, data)
        # elif self.state_ == PipelineState.NEED_MAP:
        #     return self.init_map(img, stamp, data)
        if self.state_ == PipelineState.INIT:
            return self.init_map(img, stamp,
                                 data)  # self.initializer_.compute(
        elif self.state_ == PipelineState.TRACK:
            return self.track(img, stamp, data)

    def save(self, path):
        if not os.path.exists(path):
            os.makedirs(path)

        def D_(p):
            return os.path.join(path, p)

        np.save(D_('config.npy'), self.cfg_)
        self.db_.save(path)
Esempio n. 14
0
from match import Matcher


# the men and their list of ordered spousal preferences
M = dict((m, prefs.split(", ")) for [m, prefs] in (line.rstrip().split(": ") for line in open("men.txt")))

# the women and their list of ordered spousal preferences
W = dict((m, prefs.split(", ")) for [m, prefs] in (line.rstrip().split(": ") for line in open("women.txt")))

# initialize Matcher with preference lists for both men and women
match = Matcher(M, W)

# match men and women; returns a mapping of wives to husbands
wives = match()
assert match.is_stable()  # should be a stable matching

# swap the husbands of two wives, which should make the matching unstable
wives["fay"], wives["gay"] = wives["gay"], wives["fay"]

assert match.is_stable(wives) is False  # should not be a stable matching

# with the perturbed matching we find that gav's marriage to fay is unstable:
#
#   * gav prefers gay over fay
#   * gay prefers gav over her current husband dan
Esempio n. 15
0
class Track_And_Detect:
    effective_track_thresh = 0.55
    effective_detection_thresh = 0.4

    effective_keypoints_thresh = 0.6
    effective_keypoints_number = 8

    iou_match_thresh = 0.5
    embedding_match_thresh = 2
    nms_thresh = 0.5
    oks_thresh = 0.8

    def __init__(
            self,
            gpu_id=[0, 0, 0, 0],
            flag=[True, False, True, False],
            #track_model='/export/home/zby/SiamFC/models/output/siamfc_35.pth',
            track_model='/export/home/zby/SiamFC/models/output/siamfc_20.pth',
            detection_model='/export/home/zby/SiamFC/models/res101_old/pascal_voc/faster_rcnn_1_25_4379.pth',
            pose_model='/export/home/zby/SiamFC/data/models/final_new.pth.tar',
            embedding_model='/export/home/zby/SiamFC/models/embedding_model.pth'
    ):
        if flag[0]:
            self.tracker = SiamFCTracker(gpu_id[0], track_model)  #input RGB
        if flag[1]:
            self.detector = Detector(gpu_id[1], detection_model)  #input BGR
        if flag[2]:
            self.posenet = PoseNet(gpu_id[2], pose_model)  #input BGR
        if flag[3]:
            self.embedder = EmbeddingNet(gpu_id[3], embedding_model)
        #self.tracker = SiamFCTracker(gpu_id[0], track_model)
        self.matcher = Matcher()
        print('----------------------------------------')

    #initialize the first frame of this video
    def init_tracker(self, frame, bbox_list):
        self.new_id_flag = 0
        self.track_id_dict = dict()
        #self.tracker.clear_data()
        #conver bgr(opencv) to rgb
        rgb_frame = frame[:, :, ::-1]
        bbox_list, keypoint_list = self.oks_filter(bbox_list, frame)
        #print(bbox_list)
        for bbox in bbox_list:
            self.create_id(frame, rgb_frame, bbox)
        bbox_list = []
        #pose_list=[]
        for id, item in self.track_id_dict.items():
            bbox = item['bbox_and_score'] + [id]
            bbox_list.append(bbox)
        #	pose_position, pose_value, pose_heatmap = self.pose_detect(frame, bbox)
        #	pose_info = np.hstack(pose_postion, pose_value)
        return bbox_list

    def oks_filter(self, det_list, frame):
        keypoint_list = []
        for bbox in det_list:
            center, scale = self.posenet.x1y1x2y2_to_cs(bbox[0:4])
            area = np.prod(scale * 200, 1)
            pred = np.zeros((15, 3), dtype=np.float32)
            pose_positions, pose_vals, pose_heatmaps = self.pose_detect(
                frame, bbox)
            #print(pose_vals)
            #posa_vals = np.expand_dims(pose_vals, axis=1)
            pred[:, 0:2] = pose_positions
            pred[:, 2] = pose_vals
            score_all, valid_num = 0, 0
            for i in range(15):
                score_i = pose_vals[i]
                if score_i >= 0.2:
                    score_all += score_i
                    valid_num += 1
            if valid_num != 0:
                new_score = score_all / valid_num * bbox[4]
            else:
                new_score = 0
            keypoint_dict = {
                'score': new_score,
                'area': area,
                'keypoints': pred
            }
            keypoint_list.append(keypoint_dict)
        keep = self.matcher.oks_nms(keypoint_list, thresh=self.oks_thresh)
        new_det_list = [det_list[i] for i in keep]
        new_keypoint_list = [keypoint_list[i] for i in keep]
        return new_det_list, new_keypoint_list

    def create_id(self, frame, rgb_frame, bbox):
        score = bbox[4]
        bbox = bbox[0:4]
        track_id = self.new_id_flag
        feature = self.embedding(frame, bbox)
        #self.track_id_dict[track_id]={'bbox_and_score':bbox+[score]}
        self.track_id_dict[track_id] = {
            'bbox_and_score': bbox + [score],
            'feature': feature,
            'frame_flag': 1,
            'exist': True
        }
        #self.track_id_dict[track_id]={'bbox_and_score':bbox+[score],'feature':[feature], 'frame_flag':1, 'exist':True}
        #pose_position, pose_value, pose_heatmap = self.pose_detect(frame, bbox)
        #self.update_tracker(rgb_frame, bbox, track_id)
        #print('Track id {} has been initinized'.format(track_id))
        self.new_id_flag += 1

    def update_id(self, frame, rgb_frame, det_bbox, track_id):
        bbox, score = det_bbox[0:4], det_bbox[4]
        feature = np.array(self.embedding(frame, bbox))
        former_track_dict = self.track_id_dict[track_id]
        former_frame_flag, former_feature = former_track_dict[
            'frame_flag'], np.array(former_track_dict['feature'])
        now_frame_flag = former_frame_flag + 1
        #calculate the average feature
        #now_feature = ((former_feature*former_frame_flag+feature)/now_frame_flag).tolist()
        #former_feature = former_feature.tolist()
        now_feature = feature.tolist()
        #former_feature.append(now_feature)
        self.track_id_dict[track_id] = {
            'bbox_and_score': det_bbox,
            'feature': now_feature,
            'frame_flag': now_frame_flag,
            'exist': True
        }
        #pose_position, pose_value, pose_heatmap = self.pose_detect(frame, bbox)
        #self.update_tracker(rgb_frame, bbox, track_id)

    def multi_track(self, frame):
        rgb_frame = frame[:, :, ::-1]
        bbox_list = []
        for id in self.track_id_dict:
            if self.track_id_dict[id]['exist'] == False:
                continue
            bbox, score = self.tracker.track_id(rgb_frame, id)
            bbox_list.append(bbox + [score] + [id])
            self.track_id_dict[id]['bbox'] = bbox
        return bbox_list

    def match_and_track_embedding(self, detections, track_list, frame):
        # print(self.track_id_dict.keys())
        # exist_flag = []
        # for id in self.track_id_dict:
        # exist_flag.append(self.track_id_dict[id]['exist'])
        # print(exist_flag)
        rgb_frame = frame[:, :, ::-1]
        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections, track_list, iou_threshold=self.iou_match_thresh)

        has_tracked_id = set()
        for match in matches:
            det_index, track_index = match
            has_tracked_id.add(track_index)
            det_bbox = detections[det_index]
            update_id = track_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #get feature unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        #get feature for unmatched_trackers
        database_feature_list = []
        for database_id in self.track_id_dict:
            if database_id in has_tracked_id:
                continue
            database_id_feature = self.track_id_dict[database_id][
                'feature'] + [database_id]
            database_feature_list.append(database_id_feature)

        # for delete_index in unmatched_trackers:
        # track_bbox = track_list[delete_index]
        # track_score, delete_id = track_bbox[4], track_bbox[5]
        # delete_id_feature = self.track_id_dict[delete_id]['feature'] + [delete_id]
        # track_feature_list.append(delete_id_feature)

        #match the detection and tracklist
        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              database_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = database_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #change status for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            self.track_id_dict[delete_id]['exist'] = False

        #delete unuseful index for unmatched_trackers
        # for delete_index in embedding_unmatched_trackers:
        # delete_id = track_feature_list[delete_index][2048]
        # del self.track_id_dict[delete_id]
        # self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_detection_embedding(self, detections, frame):
        rgb_frame = frame[:, :, ::-1]
        #get detection feature
        det_feature_list = []
        for det_bbox in detections:
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        #get feature for former trackers
        database_feature_list = []
        for database_id in self.track_id_dict:
            database_id_feature = self.track_id_dict[database_id][
                'feature'] + [database_id]
            database_feature_list.append(database_id_feature)

        #match the detection and tracklist
        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              database_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = database_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = database_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_detection_iou(self, detections, frame):
        rgb_frame = frame[:, :, ::-1]

        #get feature for former trackers
        database_bbox_list = []
        for database_id in self.track_id_dict:
            database_id_bbox = self.track_id_dict[database_id][
                'bbox_and_score'] + [database_id]
            database_bbox_list.append(database_id_bbox)

        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections,
            database_bbox_list,
            iou_threshold=self.iou_match_thresh)

        #update the matched trackers with detection bbox
        for match in matches:
            det_index, track_index = match
            det_bbox = detections[det_index]
            update_id = database_bbox_list[track_index][5]
            self.track_id_dict[update_id]['bbox_and_score'] = det_bbox[0:5]

        #create new index for unmatched_detections
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in unmatched_trackers:
            delete_id = database_bbox_list[delete_index][5]
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_detection_iou_embedding(self, detections, frame):
        rgb_frame = frame[:, :, ::-1]

        #final_bbox = []
        detections, keypoint_list = self.oks_filter(detections, frame)
        #get feature for former trackers
        database_bbox_list = []
        for database_id in self.track_id_dict:
            database_id_bbox = self.track_id_dict[database_id][
                'bbox_and_score'] + [database_id]
            database_bbox_list.append(database_id_bbox)

        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections,
            database_bbox_list,
            iou_threshold=self.iou_match_thresh)

        #update the matched trackers with detection bbox
        for match in matches:
            det_index, track_index = match
            det_bbox = detections[det_index]
            update_id = database_bbox_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)
            #final_bbox.append(det_bbox+[update_id])

        #create new index for unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        track_feature_list = []
        for delete_index in unmatched_trackers:
            track_bbox = database_bbox_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            delete_id_feature = self.track_id_dict[delete_id]['feature'] + [
                delete_id
            ]
            track_feature_list.append(delete_id_feature)

        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = track_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            #self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        return bbox_list

    def match_detection_tracking_iou_embedding(self, detections, track_list,
                                               frame):
        rgb_frame = frame[:, :, ::-1]

        #print(detections)
        for track in track_list:
            track_score = track[4]
            if track_score >= self.effective_track_thresh:
                detections.append(track[0:5])
        #print(detections)
        detections, keypoint_list = self.oks_filter(detections, frame)
        #get feature for former trackers
        database_bbox_list = []
        for database_id in self.track_id_dict:
            database_id_bbox = self.track_id_dict[database_id][
                'bbox_and_score'] + [database_id]
            database_bbox_list.append(database_id_bbox)

        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections,
            database_bbox_list,
            iou_threshold=self.iou_match_thresh)

        #update the matched trackers with detection bbox
        for match in matches:
            det_index, track_index = match
            det_bbox = detections[det_index]
            update_id = database_bbox_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)
            #final_bbox.append(det_bbox+[update_id])

        #create new index for unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        track_feature_list = []
        for delete_index in unmatched_trackers:
            track_bbox = database_bbox_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            delete_id_feature = self.track_id_dict[delete_id]['feature'] + [
                delete_id
            ]
            track_feature_list.append(delete_id_feature)

        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = track_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        return bbox_list

    def match_and_track_embedding_no_database(self, detections, track_list,
                                              frame):
        # print(self.track_id_dict.keys())
        rgb_frame = frame[:, :, ::-1]
        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections, track_list, iou_threshold=self.iou_match_thresh)

        has_tracked_id = set()
        for match in matches:
            det_index, track_index = match
            has_tracked_id.add(track_index)
            det_bbox = detections[det_index]
            update_id = track_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #get feature unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        #get feature for unmatched_trackers
        track_feature_list = []
        for delete_index in unmatched_trackers:
            track_bbox = track_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            delete_id_feature = self.track_id_dict[delete_id]['feature'] + [
                delete_id
            ]
            track_feature_list.append(delete_id_feature)
        #match the detection and tracklist
        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = track_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_and_track_embedding_temporal_database(self, detections,
                                                    track_list, frame):
        # print(self.track_id_dict.keys())
        rgb_frame = frame[:, :, ::-1]
        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections, track_list, iou_threshold=self.iou_match_thresh)

        has_tracked_id = set()
        for match in matches:
            det_index, track_index = match
            has_tracked_id.add(track_index)
            det_bbox = detections[det_index]
            update_id = track_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #get feature unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        #get feature for unmatched_trackers
        track_feature_list = []
        for delete_index in unmatched_trackers:
            track_bbox = track_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            delete_id_features = self.track_id_dict[delete_id]['feature']
            for delete_id_feature in delete_id_features:
                track_feature_list.append(delete_id_feature + [delete_id])
        #match the detection and tracklist
        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        update_ids = dict()
        for match in embedding_matches:
            det_index, track_index = match
            update_id = track_feature_list[track_index][2048]
            if not update_id in update_ids:
                update_ids[update_id] = det_index
            else:
                score_former = det_feature_list[update_ids[update_id]][-1]
                score_now = det_feature_list[det_index][-1]
                if score_now >= score_former:
                    update_ids[update_id] = det_index

        for update_id, det_index in update_ids.items():
            det_bbox = det_feature_list[det_index][2048:]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            if delete_id not in update_ids and delete_id in self.track_id_dict:
                del self.track_id_dict[delete_id]
                self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_and_track_embedding_no_database_nms(self, detections, track_list,
                                                  frame):
        # print(self.track_id_dict.keys())
        rgb_frame = frame[:, :, ::-1]
        all_bbox = []
        for det in detections:
            all_bbox.append(det)
        for track in track_list:
            if not track[4] >= self.effective_track_thresh:
                continue
            all_bbox.append(track[0:4] + [track[5]])
        all_bbox = np.array(all_bbox)
        #print(all_bbox)
        keep = self.matcher.nms(all_bbox, self.nms_thresh)
        keep_bboxes = all_bbox[keep].tolist()
        #print(len(all_bbox), len(keep_bboxes))
        #print(keep_bboxes)

        #get feature unmatched_detections
        det_feature_list = []
        for keep_bbox in keep_bboxes:
            det_id_feature = self.embedding(frame, keep_bbox) + keep_bbox
            det_feature_list.append(det_id_feature)

        #get feature for unmatched_trackers
        track_feature_list = []
        for former_id in self.track_id_dict:
            former_id_feature = self.track_id_dict[former_id]['feature'] + [
                former_id
            ]
            track_feature_list.append(former_id_feature)
        #match the detection and tracklist
        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = track_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    def match_and_track_iou(self, detections, track_list, frame):
        rgb_frame = frame[:, :, ::-1]
        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections, track_list, iou_threshold=self.iou_match_thresh)

        #update the matched trackers with detection bbox
        for match in matches:
            det_index, track_index = match
            det_bbox = detections[det_index]
            update_id = track_list[track_index][5]
            #print('id {} has been updated'.format(update_id))
            self.track_id_dict[update_id]['bbox_and_score'] = det_bbox[0:5]
            self.update_tracker(rgb_frame, det_bbox, update_id)

        #create new index for unmatched_detections
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                #print('add id{} now'.format(self.new_id_flag))
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in unmatched_trackers:
            track_bbox = track_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            #pose_position, pose_value, pose_heatmap = self.pose_detect(frame, track_bbox)
            # if track_score >= self.effective_track_thresh and np.sum(pose_value >= self.effective_keypoints_thresh) >= self.effective_keypoints_number:
            # self.track_id_dict[delete_id]['bbox_and_score']=track_bbox[0:5]
            # else:
            # #print('delete id{} now'.format(delete_id))
            del self.track_id_dict[delete_id]
            self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            bbox_list.append(item['bbox_and_score'] + [id])
        #print(bbox_list)
        return bbox_list

    #bbox must be format of x1y1x2y2
    def update_tracker(self, rgb_frame, bbox, track_id):
        self.tracker.update_data_dict(rgb_frame, bbox, track_id)

    def embedding(self, frame, bbox):
        start = time.time()
        #print(bbox)
        feature = self.embedder.get_feature(frame, bbox)
        #print('Embedding takes {}s'.format(time.time()-start))
        return feature

    def detect(self, im):
        return self.detector.detect(im, self.nms_thresh, 0.5)

    def pose_detect(self, im, bbox):
        return self.posenet.detect_pose(im, bbox)
Esempio n. 16
0
class Track_And_Detect(object):
    effective_track_thresh = 0.5
    effective_detection_thresh = 0.5
    effective_keypoints_thresh = 0.6
    effective_keypoints_number = 8
    iou_match_thresh = 0.5
    nms_thresh = 0.5
    oks_thresh = 0.8
    embedding_match_thresh = 2
    feature_length = 2048

    oks_flag = True
    tracker_flag = True
    tracker_update_flag = True
    new_embedding_flag = True
    descrease_tracker_flag = True

    #descrease_tracker_flag = False

    def __init__(self,
                 gpu_id=0,
                 track_model=None,
                 pose_model=None,
                 embedding_model=None):
        if self.tracker_flag:
            self.tracker = SiamFCTracker(gpu_id, track_model)
        self.posenet = PoseNet(gpu_id, pose_model)

        self.matcher = Matcher()
        print('----------------------------------------')
        print('Flag parameters are set as follow:')
        print('Tracker flag: {}'.format(self.tracker_flag))
        print('Tracker update flag: {}'.format(self.tracker_update_flag))
        print('Decrease tracker flag: {}'.format(self.descrease_tracker_flag))
        print('New embedding(with pose) flag: {}'.format(
            self.new_embedding_flag))
        print('----------------------------------------')

    # bbox must be format of x1y1x2y2
    def update_tracker(self, rgb_frame, bbox, track_id):
        self.tracker.update_data_dict(rgb_frame, bbox, track_id)

    def pose_detect(self, im, bbox):
        return self.posenet.detect_pose(im, bbox)

    def embedding(self, frame, bbox):
        feature = self.posenet.embedding(frame, bbox)
        return feature

    # initialize the first frame of this video
    def init_tracker(self, frame, bbox_list):
        self.new_id_flag = 0
        self.track_id_dict = dict()
        if self.tracker_flag:
            self.tracker.clear_data()
        # conver bgr(opencv) to rgb
        rgb_frame = frame[:, :, ::-1]
        bbox_list, keypoint_list = self.oks_filter(bbox_list, frame)
        for bbox in bbox_list:
            self.create_id(frame, rgb_frame, bbox)
        bbox_list = []
        for id, item in self.track_id_dict.items():
            bbox = item['bbox_and_score'] + [id]
            bbox_list.append(bbox)
        return bbox_list

    def oks_filter(self, det_list, frame):
        keypoint_list = []
        for bbox in det_list:
            center, scale = self.posenet.x1y1x2y2_to_cs(bbox[0:4])
            area = np.prod(scale * 200, 1)
            pred = np.zeros((15, 3), dtype=np.float32)
            pose_positions, pose_vals, pose_heatmaps = self.pose_detect(
                frame, bbox)
            pred[:, 0:2] = pose_positions
            pred[:, 2] = pose_vals
            score_all, valid_num = 0, 0
            for i in range(15):
                score_i = pose_vals[i]
                if score_i >= 0.2:
                    score_all += score_i
                    valid_num += 1
            if valid_num != 0:
                new_score = score_all / valid_num * bbox[4]
            else:
                new_score = 0
            keypoint_dict = {
                'score': new_score,
                'area': area,
                'keypoints': pred
            }
            keypoint_list.append(keypoint_dict)
        keep = self.matcher.oks_nms(keypoint_list, thresh=self.oks_thresh)
        new_det_list = [det_list[i] for i in keep]
        new_keypoint_list = [keypoint_list[i] for i in keep]
        return new_det_list, new_keypoint_list

    def create_id(self, frame, rgb_frame, bbox):
        score = bbox[4]
        bbox = bbox[0:4]
        track_id = self.new_id_flag
        if self.new_embedding_flag:
            feature = self.embedding(frame, bbox)
        else:
            feature = self.embedder.embedding(frame, bbox)
        self.track_id_dict[track_id] = {
            'bbox_and_score': bbox + [score],
            'feature': feature,
            'frame_flag': 1,
            'exist': True
        }
        if self.tracker_flag:
            self.update_tracker(rgb_frame, bbox, track_id)
        self.new_id_flag += 1

    def update_id(self, frame, rgb_frame, det_bbox, track_id):
        bbox, score = det_bbox[0:4], det_bbox[4]
        if self.new_embedding_flag:
            feature = np.array(self.embedding(frame, bbox))
        else:
            feature = np.array(self.embedder.embedding(frame, bbox))

        former_track_dict = self.track_id_dict[track_id]
        former_frame_flag, former_feature = former_track_dict[
            'frame_flag'], np.array(former_track_dict['feature'])
        now_frame_flag = former_frame_flag + 1
        now_feature = feature.tolist()
        self.track_id_dict[track_id] = {
            'bbox_and_score': det_bbox,
            'feature': now_feature,
            'frame_flag': now_frame_flag,
            'exist': True
        }
        if self.tracker_flag and self.tracker_update_flag:
            self.update_tracker(rgb_frame, bbox, track_id)

    def multi_track(self, frame):
        rgb_frame = frame[:, :, ::-1]
        bbox_list = []
        for id in self.track_id_dict:
            if self.track_id_dict[id]['exist'] == False:
                continue
            bbox, score = self.tracker.track_id(rgb_frame, id)
            bbox_list.append(bbox + [score] + [id])
            self.track_id_dict[id]['bbox'] = bbox
        return bbox_list

    def match_detection_tracking_oks_iou_embedding(self, detections,
                                                   track_list, frame):
        rgb_frame = frame[:, :, ::-1]

        #print(detections)
        for track in track_list:
            track_score = track[4]
            if track_score >= self.effective_track_thresh:
                if self.descrease_tracker_flag:
                    track_score -= 0.35
                detections.append(track[0:4] + [track_score])
        #print(detections)
        if self.oks_flag:
            detections, keypoint_list = self.oks_filter(detections, frame)
        #decrease the tracking score

        #get feature for former trackers
        database_bbox_list = []
        for database_id in self.track_id_dict:
            database_id_bbox = self.track_id_dict[database_id][
                'bbox_and_score'] + [database_id]
            database_bbox_list.append(database_id_bbox)

        matches, unmatched_detections, unmatched_trackers = self.matcher.associate_detections_to_trackers_iou(
            detections,
            database_bbox_list,
            iou_threshold=self.iou_match_thresh)

        #update the matched trackers with detection bbox
        for match in matches:
            det_index, track_index = match
            det_bbox = detections[det_index]
            update_id = database_bbox_list[track_index][5]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new index for unmatched_detections
        det_feature_list = []
        for new_index in unmatched_detections:
            det_bbox = detections[new_index]
            det_score = det_bbox[4]
            if self.new_embedding_flag:
                det_id_feature = self.embedding(frame, det_bbox) + det_bbox
            else:
                det_id_feature = self.embedder.embedding(frame,
                                                         det_bbox) + det_bbox
            det_feature_list.append(det_id_feature)

        track_feature_list = []
        for delete_index in unmatched_trackers:
            track_bbox = database_bbox_list[delete_index]
            track_score, delete_id = track_bbox[4], track_bbox[5]
            delete_id_feature = self.track_id_dict[delete_id]['feature'] + [
                delete_id
            ]
            track_feature_list.append(delete_id_feature)

        embedding_matches, \
        embedding_unmatched_detections,\
        embedding_unmatched_trackers = self.matcher.associate_detections_to_trackers_embedding(det_feature_list,
                              track_feature_list,
                              distance_threshold = self.embedding_match_thresh)

        #update matched embedding detections and former tracking feature
        for match in embedding_matches:
            det_index, track_index = match
            det_bbox = det_feature_list[det_index][2048:]
            update_id = track_feature_list[track_index][2048]
            self.update_id(frame, rgb_frame, det_bbox, update_id)

        #create new id for unmatched detections
        for new_index in embedding_unmatched_detections:
            det_bbox = det_feature_list[new_index][2048:]
            det_score = det_bbox[4]
            pose_position, pose_value, pose_heatmap = self.pose_detect(
                frame, det_bbox)
            if det_score >= self.effective_detection_thresh and np.sum(
                    pose_value >= self.effective_keypoints_thresh
            ) >= self.effective_keypoints_number:
                self.create_id(frame, rgb_frame, det_bbox)

        #delete unuseful index for unmatched_trackers
        for delete_index in embedding_unmatched_trackers:
            delete_id = track_feature_list[delete_index][2048]
            del self.track_id_dict[delete_id]
            if self.tracker_flag:
                self.tracker.delete_id(delete_id)

        bbox_list = []
        for id, item in self.track_id_dict.items():
            if item['exist'] == True:
                bbox_list.append(item['bbox_and_score'] + [id])
        return bbox_list
Esempio n. 17
0
MR = dict(
    (m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                              for line in open('mentors.txt')))

# the women and their list of ordered spousal preferences
ME = dict(
    (m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                              for line in open('mentees.txt')))

previous_mentee_size = None
final_mentee_mentors = dict()
i = 0
while len(ME) > 0:
    i += 1
    previous_mentee_size = len(ME)
    match = Matcher(MR, ME)
    matches = match()

    final_mentee_mentors.update(matches)

    for mentee, mentor in matches.items():
        # This mentee already found a mentor for themselves. Delete from matching.
        ME.pop(mentee, True)

        # Delete this mentee from all mentor selections, so that mentors can fall back to their other
        # selections.
        # todo what to do with mentees that were not selected by any mentor?
        new_MR = MR.copy()
        for mrk, mrv in MR.items():
            if mentee in mrv:
                new_MR.get(mrk).remove(mentee)
Esempio n. 18
0
from match import Matcher

# the men and their list of ordered spousal preferences
M = dict(
    (m, prefs.split(', '))
    for [m, prefs] in (line.rstrip().split(': ') for line in open('men.txt')))

# the women and their list of ordered spousal preferences
W = dict(
    (m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                              for line in open('women.txt')))

# initialize Matcher with preference lists for both men and women
match = Matcher(M, W)


# check if the mapping of wives to husbands is stable
def is_stable(wives, verbose=False):
    for w, m in wives.items():
        i = M[m].index(w)
        preferred = M[m][:i]
        for p in preferred:
            h = wives[p]
            if W[p].index(m) < W[p].index(h):
                msg = "{}'s marriage to {} is unstable: " + \
                      "{} prefers {} over {} and {} prefers " + \
                      "{} over her current husband {}"
                if verbose:
                    print msg.format(m, w, m, p, w, p, m, h)
                return False
    return True
Esempio n. 19
0
import cv2
import imutils
import numpy as np
import rospy
from geometry_msgs.msg import Point
from std_msgs.msg import String
from background_subtraction import BackgroundSubtractor

from match import Matcher

MIN_AREA = 10000
MAX_DIST = 10
k_dilate = cv2.getStructuringElement(cv2.MORPH_DILATE, (5, 5), (2, 2))

matcher = Matcher()


def within(low, x, high):
    return low < x and x < high


def x2t(x, dim, fov):
    theta = fov / 2
    # angle difference
    a = float(x) / dim  # assuming 640 is frame width
    return np.arctan2(2 * a * np.sin(theta) - np.sin(theta), np.cos(theta))


def cnt_avg_col(cnt, img):
    mask = np.zeros(img.shape[:-1], np.uint8)
Esempio n. 20
0
    for w, m in wives.items():
        i = M[m].index(w)
        preferred = M[m][:i]
        for p in preferred:
            h = wives[p]
            if W[p].index(m) < W[p].index(h):  
                msg = "{}'s marriage to {} is unstable: " + \
                      "{} prefers {} over {} and {} prefers " + \
                      "{} over her current husband {}"
                if verbose:
                    print msg.format(m, w, m, p, w, p, m, h) 
                return False
    return True

# initialize Matcher with preference lists for both men and women
match = Matcher(M, W)

# match men and women; returns a mapping of wives to husbands
wives = match()
assert match.is_stable()            # should be a stable matching
assert wives['w1'] is 'm3'          # `w1` is married to `m3`

# now change prefs of woman `w1` and rematch
W['w1'] = ["m3", "m1", "m2"]

# re-initialize Matcher with revised preference lists
match = Matcher(M, W)
wives = match()
assert match.is_stable()            # should be a stable matching
assert wives['w1'] is 'm1'          # but `w1` is now married to `m1`
Esempio n. 21
0
from match import Matcher


# the men and their list of ordered spousal preferences
M = dict((m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                for line in open('men.txt')))

# the women and their list of ordered spousal preferences
W = dict((m, prefs.split(', ')) for [m, prefs] in (line.rstrip().split(': ')
                                for line in open('women.txt')))

# for each man construct a random list of forbidden wives
forbidden = {}      # { 'dan': ['gay', 'eve', 'abi'], 'hal': ['eve'] }
for m, prefs in M.items():
    n = random.randint(0, len(prefs) - 1)
    forbidden[m] = random.sample(prefs, n)  # random sample of n wives

match = Matcher(M, W, forbidden)

# match men and women; returns a mapping of wives to husbands
wives = match()

assert match.is_stable(wives)           # should be a stable matching

# swap the husbands of two wives, which should make the matching unstable
a, b = random.sample(wives.keys(), 2)
wives[b], wives[a] = wives[a], wives[b]

match.is_stable(wives, verbose=True)