Exemplo n.º 1
0
class WGRAP(object):
    """
    Implements the weighted coverage group base reviewer assignment problem
    inference algorithm in Kou et. al. (2015)

    alpha - the maximum number of papers any reviewer can be assigned
    beta - the number of reviews required per paper
    """

    def __init__(self, rev_mat, pap_mat, alpha, beta):
        self.munkres = Munkres()
        self.rev_mat = rev_mat
        self.pap_mat = pap_mat

        reviewer_tensor = np.tile(np.array(self.rev_mat)[:,np.newaxis,:], (1,len(self.pap_mat),1))
        paper_tensor = np.tile(np.array(self.pap_mat)[np.newaxis,:,:], (len(self.rev_mat),1,1))
        self.score_mat = np.sum(np.minimum(reviewer_tensor, paper_tensor), axis=2)

        self.alpha = alpha
        self.beta = beta
        self.nrevs_per_round = np.ceil(self.alpha / self.beta)
        self.curr_assignment = np.zeros((np.size(self.rev_mat, axis=0),
                                         np.size(self.pap_mat, axis=0)))

    def group_score(self, revs, pap):
        if not any(revs):
            return 0.0
        group_max = np.amax(self.rev_mat[revs], axis=0)
        return np.sum(np.minimum(group_max, self.pap_mat[pap])) / float(np.sum(self.pap_mat[pap]))

    def marginal_gain(self, g1, g2, pap):
        return self.group_score(g2, pap) - self.group_score(g1, pap)

    # I need some code that will create the matrix that I want to run
    # munkres algorithm on
    def curr_rev_group(self, paper):
        return np.nonzero(self.curr_assignment[:,paper])[0]

    def marg_gain_for_rev_pap(self, rev, paper):
        g1 = self.curr_rev_group(paper)
        assert rev not in set(g1)
        if not any(g1):
            g2 = [rev]
        else:
            g2 = []
            g2.extend(g1)
            g2.append(rev)
        return self.marginal_gain(g1, g2, paper)

    def marg_gain_for_rev(self, rev):
        mg = []
        for pap in range(np.size(self.pap_mat, axis=0)):
            if self.curr_assignment[rev, pap] == 1.0:
                mg.append(-1.0)   # should never reassign a reviewer to the same paper
            else:
                mg.append(self.marg_gain_for_rev_pap(rev, pap))
        return mg

    def curr_n_revs(self, rev):
        return np.sum(self.curr_assignment[rev,:])

    def _construct_matching_mat(self, post_refine=False):
        rows = []
        rows_to_revs = {}
        for rev in range(np.size(self.rev_mat, axis=0)):
            if post_refine:
                n_rows = int(self.alpha - self.curr_n_revs(rev))
            else:
                n_rows = int(np.min((self.alpha - self.curr_n_revs(rev), self.nrevs_per_round)))

            if n_rows > 0:
                rev_marg_gain = self.marg_gain_for_rev(rev)

            for row in range(n_rows):
                rows_to_revs[len(rows)] = rev
                rows.append(rev_marg_gain[:])
        return rows, rows_to_revs

    def refine(self, R=0.01, l=0.5, itr=0, show=False):
        """
        Implements 1 iteration of stochastic refinement
        """
        for pap in range(np.size(self.pap_mat, axis=0)):
            assigned_revs = np.nonzero(self.curr_assignment[:,pap])[0]
            assert len(assigned_revs) > 0
            rev_scores = []
            for rev in assigned_revs:
                assert self.curr_assignment[rev,pap] == 1.0
                r_score = np.exp(-l * itr) * \
                          self.score_mat[rev, pap] / np.sum(self.score_mat[rev,:])
                r_score = np.max((r_score, 1.0 / R))
                rev_scores.append(r_score)
            rev_scores = np.array(rev_scores)
            inv_rev_scores = np.sum(rev_scores) - rev_scores
            normed_inv_rev_scores = inv_rev_scores / np.sum(inv_rev_scores)
            sample = np.nonzero(np.random.multinomial(1, normed_inv_rev_scores))[0]
            if show:
                print normed_rev_scores
                print sample
            rev_to_remove = assigned_revs[sample]
            assert self.curr_assignment[rev_to_remove, pap] == 1.0
            self.curr_assignment[rev_to_remove, pap] = 0.0

    def _solve_assignment_and_update(self, rows, rows_to_revs, max_val = 10.0, show=False):
        """
        Implements 1 iteration of stagewise deepening (no stochastic refinement)
        """
        if show:
            print rows
        cost_matrix = self.munkres.make_cost_matrix(rows, lambda v: max_val - v)
        #indexes = self.munkres.compute(cost_matrix)
        row_inds, col_inds = linear_sum_assignment(np.array(cost_matrix))
        if show:
            print cost_matrix
        # for row, col in indexes:
        for row, col in zip(row_inds, col_inds):
            self.curr_assignment[rows_to_revs[row],col] = 1
            if show:
                value = rows[row][col]
                print '(%d, %d) -> %f' % (row, col, value)

    def solve(self):
        for i in range(self.beta):
            print "ITERATION %d" % i
            rows, rows_to_revs = self._construct_matching_mat()
            print "MATRIX CONSTRUCTED"
            self._solve_assignment_and_update(rows, rows_to_revs)
        return self.curr_assignment

    def score_assignment(self):
        total = 0.0
        for pap in range(np.size(self.pap_mat, axis=0)):
            assigned_revs = np.nonzero(self.curr_assignment[:,pap])[0]
            total += self.group_score(assigned_revs, pap)
        return total
Exemplo n.º 2
0
    def detected_leg_clusters_callback(self, msg):
        tic = timeit.default_timer()

        # Attempt to pair legs. Assign pairs or isolated legs to detected_persons.
        detected_persons = []
        matched_legs = set()
        detections_from_two_legs = set()

        # Construct matrix of probability of matching all legs to each other
        p_pair = []
        for leg_index_1, leg_1 in enumerate(msg.poses):
            new_row = []
            for leg_index_2, leg_2 in enumerate(msg.poses):
                if leg_index_1 == leg_index_2:
                    # Probability of matching a leg to itself is 0
                    new_row.append(999999.)  #
                else:
                    # Probability of matching a leg to another is based on euclidian distance
                    dist = ((leg_1.position.x - leg_2.position.x)**2 +
                            (leg_1.position.y - leg_2.position.y)**2)**(1. /
                                                                        2.)
                    new_row.append(dist)
            if new_row:
                p_pair.append(new_row)

        # Make sure cost matrix is square for Munkres (see below). Newly created elements have dist of 99999
        if p_pair:
            rows = len(p_pair)
            cols = len(p_pair[0])
            if rows > cols:  # more rows than cols
                for i in xrange(rows):
                    for j in range(rows - cols):
                        p_match[i].append(999999.)
            else:  # more cols than rows
                for i in xrange(cols - rows):
                    new_row = []
                    for j in xrange(cols):
                        new_row.append(999999.)
                    p_match.append(new_row)

        # Minimum matching algorithm of leg pairs.
        if p_pair:
            munkres = Munkres()
            indexes = munkres.compute(p_pair)
            for leg_index_1, leg_index_2 in indexes:
                if p_pair[leg_index_1][
                        leg_index_2] < self.MAX_LEG_PAIRING_DIST and leg_index_1 not in matched_legs and leg_index_2 not in matched_legs:
                    # Found a pair of legs which are closer together than MAX_LEG_PAIRING_DIST
                    leg_1 = msg.poses[leg_index_1]
                    leg_2 = msg.poses[leg_index_2]
                    detected_persons.append(
                        ((leg_1.position.x + leg_2.position.x) / 2.,
                         (leg_1.position.y + leg_2.position.y) / 2.,
                         max(leg_1.position.z, leg_2.position.z))
                    )  # note: the z position is actually the detection confidence. We'll take the max of the two legs TODO should probably add the two confidences... or something
                    matched_legs.add(leg_index_2)
                    matched_legs.add(leg_index_1)
                    detections_from_two_legs.add(len(detected_persons) - 1)

            # Legs that weren't paired are assigned to individual detections
            for leg_index, leg in enumerate(msg.poses):
                if leg_index not in matched_legs:  # no matching leg was found
                    detected_persons.append(
                        (leg.position.x, leg.position.y, leg.position.z))
                    matched_legs.add(leg_index)

        # Construct matrix of probability of matching between all persons and all detections.
        p_match = []
        for detect_index, detect in enumerate(detected_persons):
            new_row = []
            for person_index, person in enumerate(self.person_list):
                euclid_dist = ((detect[0] - person.pos_x)**2 +
                               (detect[1] - person.pos_y)**2)**(1. / 2.)

                # p_euclid = 1.0 if euclid_dist == 0, p_euclid = 0.0 if euclid_dist > MAX_MATCH_DIST and it is linearly interpolated in between
                p_euclid = max(0., (self.MAX_MATCH_DIST - euclid_dist) /
                               self.MAX_MATCH_DIST)

                # p_confidence = 1.0 if detection confidence >= SURE_ITS_A_LEG, p_confidence = 0 if detection confidence == 0 and it is linearly interpolated in between
                p_confidence = min(1., detect[2] / self.SURE_ITS_A_LEG)
                p_combined = p_euclid * p_confidence
                new_row.append(p_combined)
            if new_row:
                p_match.append(new_row)

        # Make sure cost matrix is square for Munkres (see below). Newly created elements have probability = 0.
        if p_match:
            rows = len(p_match)
            cols = len(p_match[0])
            if rows > cols:  # more rows than cols
                for i in xrange(rows):
                    for j in range(rows - cols):
                        p_match[i].append(0.)
            else:  # more cols than rows
                for i in xrange(cols - rows):
                    new_row = []
                    for j in xrange(cols):
                        new_row.append(0.)
                    p_match.append(new_row)

        observations = {}
        confidence = {}
        matched_persons = set()
        matched_detections = set()

        # Minimum matching algorithm of person detections to existing person tracks.
        if p_match:
            munkres = Munkres()

            # Create new matrix with the element-wise inverse of the p_match. Because munkres only finds the lowest possible assignments and we wanted the highest.
            inv_p_match = munkres.make_cost_matrix(p_match,
                                                   lambda cost: 1.0 - cost)

            indexes = munkres.compute(inv_p_match)
            for detect_index, person_index in indexes:
                if p_match[detect_index][
                        person_index] > self.MIN_MATCH_PROBABILITY:
                    observations[person_index] = np.array([
                        detected_persons[detect_index][0],
                        detected_persons[detect_index][1]
                    ])
                    confidence[person_index] = detected_persons[detect_index][
                        2]
                    matched_persons.add(person_index)
                    matched_detections.add(detect_index)

        # update persons's positions with new oberservations
        person_list_to_delete = []
        for person_index, person in enumerate(self.person_list):
            if not person_index in matched_persons:  # if a person was not matched to an observation, given them an "observation missing" but still update their kalman filter
                observations[person_index] = ma.masked_array(np.array([0, 0]),
                                                             mask=[1, 1])
                person.last_seen += 1
                person.confidences.append(0)
                if person.last_seen > self.MAX_UNSEEN_FRAMES or person.get_max_confidence(
                ) < self.SURE_ITS_A_LEG:
                    person_list_to_delete.insert(0, person_index)
            else:
                person.last_seen = 0
                person.times_seen += 1
                person.confidences.append(confidence[person_index])

            person.filtered_state_means, person.filtered_state_covariances = (
                person.kf.filter_update(person.filtered_state_means,
                                        person.filtered_state_covariances,
                                        observations[person_index]))
            person.pos_x = person.filtered_state_means[0]
            person.pos_y = person.filtered_state_means[1]
            person.vel_x = person.filtered_state_means[2]
            person.vel_y = person.filtered_state_means[3]

        # if detections were not match, create a new person
        for detect_index, detect in enumerate(detected_persons):
            if not detect_index in matched_detections:
                if (not self.PAIR_REQUIRED_TO_INITIATE) or (
                        self.PAIR_REQUIRED_TO_INITIATE
                        and detect_index in detections_from_two_legs):
                    if detect[2] > self.SURE_ITS_A_LEG:
                        new_person = Person(detect[0], detect[1],
                                            self.new_person_id_num)
                        new_person.confidences.append(detect[2])
                        self.person_list.append(new_person)
                        self.new_person_id_num += 1

        # delete persons that haven't been seen for a while
        for delete_index in person_list_to_delete:
            del self.person_list[delete_index]

        toc = timeit.default_timer()
        elapsed_time = toc - tic
        if elapsed_time > 0.1:
            rospy.loginfo("leg_tracker took a long time to run: %f",
                          elapsed_time)
Exemplo n.º 3
0
class WGRAP(object):
    """
    Implements the weighted coverage group base reviewer assignment problem
    inference algorithm in Kou et. al. (2015)

    alpha - the maximum number of papers any reviewer can be assigned
    beta - the number of reviews required per paper
    """
    def __init__(self, rev_mat, pap_mat, alpha, beta):
        self.munkres = Munkres()
        self.rev_mat = rev_mat
        self.pap_mat = pap_mat

        reviewer_tensor = np.tile(
            np.array(self.rev_mat)[:, np.newaxis, :],
            (1, len(self.pap_mat), 1))
        paper_tensor = np.tile(
            np.array(self.pap_mat)[np.newaxis, :, :],
            (len(self.rev_mat), 1, 1))
        self.score_mat = np.sum(np.minimum(reviewer_tensor, paper_tensor),
                                axis=2)

        self.alpha = alpha
        self.beta = beta
        self.nrevs_per_round = np.ceil(self.alpha / self.beta)
        self.curr_assignment = np.zeros(
            (np.size(self.rev_mat, axis=0), np.size(self.pap_mat, axis=0)))

    def group_score(self, revs, pap):
        if not any(revs):
            return 0.0
        group_max = np.amax(self.rev_mat[revs], axis=0)
        return np.sum(np.minimum(group_max, self.pap_mat[pap])) / float(
            np.sum(self.pap_mat[pap]))

    def marginal_gain(self, g1, g2, pap):
        return self.group_score(g2, pap) - self.group_score(g1, pap)

    # I need some code that will create the matrix that I want to run
    # munkres algorithm on
    def curr_rev_group(self, paper):
        return np.nonzero(self.curr_assignment[:, paper])[0]

    def marg_gain_for_rev_pap(self, rev, paper):
        g1 = self.curr_rev_group(paper)
        assert rev not in set(g1)
        if not any(g1):
            g2 = [rev]
        else:
            g2 = []
            g2.extend(g1)
            g2.append(rev)
        return self.marginal_gain(g1, g2, paper)

    def marg_gain_for_rev(self, rev):
        mg = []
        for pap in range(np.size(self.pap_mat, axis=0)):
            if self.curr_assignment[rev, pap] == 1.0:
                mg.append(
                    -1.0)  # should never reassign a reviewer to the same paper
            else:
                mg.append(self.marg_gain_for_rev_pap(rev, pap))
        return mg

    def curr_n_revs(self, rev):
        return np.sum(self.curr_assignment[rev, :])

    def _construct_matching_mat(self, post_refine=False):
        rows = []
        rows_to_revs = {}
        for rev in range(np.size(self.rev_mat, axis=0)):
            if post_refine:
                n_rows = int(self.alpha - self.curr_n_revs(rev))
            else:
                n_rows = int(
                    np.min((self.alpha - self.curr_n_revs(rev),
                            self.nrevs_per_round)))

            if n_rows > 0:
                rev_marg_gain = self.marg_gain_for_rev(rev)

            for row in range(n_rows):
                rows_to_revs[len(rows)] = rev
                rows.append(rev_marg_gain[:])
        return rows, rows_to_revs

    def refine(self, R=0.01, l=0.5, itr=0, show=False):
        """
        Implements 1 iteration of stochastic refinement
        """
        for pap in range(np.size(self.pap_mat, axis=0)):
            assigned_revs = np.nonzero(self.curr_assignment[:, pap])[0]
            assert len(assigned_revs) > 0
            rev_scores = []
            for rev in assigned_revs:
                assert self.curr_assignment[rev, pap] == 1.0
                r_score = np.exp(-l * itr) * \
                          self.score_mat[rev, pap] / np.sum(self.score_mat[rev,:])
                r_score = np.max((r_score, 1.0 / R))
                rev_scores.append(r_score)
            rev_scores = np.array(rev_scores)
            inv_rev_scores = np.sum(rev_scores) - rev_scores
            normed_inv_rev_scores = inv_rev_scores / np.sum(inv_rev_scores)
            sample = np.nonzero(np.random.multinomial(
                1, normed_inv_rev_scores))[0]
            if show:
                print normed_rev_scores
                print sample
            rev_to_remove = assigned_revs[sample]
            assert self.curr_assignment[rev_to_remove, pap] == 1.0
            self.curr_assignment[rev_to_remove, pap] = 0.0

    def _solve_assignment_and_update(self,
                                     rows,
                                     rows_to_revs,
                                     max_val=10.0,
                                     show=False):
        """
        Implements 1 iteration of stagewise deepening (no stochastic refinement)
        """
        if show:
            print rows
        cost_matrix = self.munkres.make_cost_matrix(rows,
                                                    lambda v: max_val - v)
        #indexes = self.munkres.compute(cost_matrix)
        row_inds, col_inds = linear_sum_assignment(np.array(cost_matrix))
        if show:
            print cost_matrix
        # for row, col in indexes:
        for row, col in zip(row_inds, col_inds):
            self.curr_assignment[rows_to_revs[row], col] = 1
            if show:
                value = rows[row][col]
                print '(%d, %d) -> %f' % (row, col, value)

    def solve(self):
        for i in range(self.beta):
            print "ITERATION %d" % i
            rows, rows_to_revs = self._construct_matching_mat()
            print "MATRIX CONSTRUCTED"
            self._solve_assignment_and_update(rows, rows_to_revs)
        return self.curr_assignment

    def score_assignment(self):
        total = 0.0
        for pap in range(np.size(self.pap_mat, axis=0)):
            assigned_revs = np.nonzero(self.curr_assignment[:, pap])[0]
            total += self.group_score(assigned_revs, pap)
        return total