예제 #1
0
    def test_2_rank_pivots(self):
        vpicReader = VPICReader('.')
        rank = Rank(vpicReader, 2)
        rank.insert(range(20))
        pivots, pivot_width = rank.compute_pivots(5)

        self.assertEqual(pivots, [0, 5, 10, 15, 19])
        self.assertAlmostEqual(pivot_width, 5.0)

        return
예제 #2
0
    def test_1_insert_init(self):
        self.vpicReader = VPICReader('.')
        self.rank = Rank(self.vpicReader, 2)

        self.rank.insert(range(20))
        self.assertEqual(len(self.rank.oob_left), 20,
                         msg="init insertion not okay")
        self.assertEqual(len(self.rank.oob_right), 0,
                         msg="init insertion not okay")
        self.assertIsNone(self.rank.pivots)
        self.assertIsNone(self.rank.pivot_counts)
예제 #3
0
 def test_straight(self):
     card_ranks_set = self.rank_cards_set()
     for index in range(len(card_ranks_set) - 4):
         if ranks.index(card_ranks_set[index].value) - ranks.index(
                 card_ranks_set[index + 4].value) == 4:
             self.straight = True
             self.straight_lead = card_ranks_set[index].value
             return True
     if Rank('A') in card_ranks_set and card_ranks_set[len(card_ranks_set) -
                                                       4] == Rank('5'):
         self.straight = True
         self.straight_lead = '5'
         return True
     return False
예제 #4
0
class TestRank(unittest.TestCase):
    def setUp(self):
        return

    def test_1_insert_init(self):
        self.vpicReader = VPICReader('.')
        self.rank = Rank(self.vpicReader, 2)

        self.rank.insert(range(20))
        self.assertEqual(len(self.rank.oob_left), 20,
                         msg="init insertion not okay")
        self.assertEqual(len(self.rank.oob_right), 0,
                         msg="init insertion not okay")
        self.assertIsNone(self.rank.pivots)
        self.assertIsNone(self.rank.pivot_counts)

    def test_2_rank_pivots(self):
        vpicReader = VPICReader('.')
        rank = Rank(vpicReader, 2)
        rank.insert(range(20))
        pivots, pivot_width = rank.compute_pivots(5)

        self.assertEqual(pivots, [0, 5, 10, 15, 19])
        self.assertAlmostEqual(pivot_width, 5.0)

        return

    def test_3_update_pivots(self):
        new_pivots = [3, 7, 11, 13]
        self.rank.update_pivots(new_pivots)
        self.rank.flush_oobs()
        self.assertEqual(self.rank.pivot_counts, [4, 4, 2])
        self.assertEqual(len(self.rank.oob_left), 3)
        self.assertEqual(len(self.rank.oob_right), 7)

    def test_4_get_pivots_again(self):
        pivots, pivot_width = self.rank.compute_pivots(3)
        self.assertEqual(pivots, [0, 10, 19])
        self.assertAlmostEqual(pivot_width, 10.0)

        pivots, pivot_width = self.rank.compute_pivots(7)
        self.assertEqual(pivots, [0, 3, 7, 10, 13, 17, 19])

    def test_repartition(self):
        pivots_old = [3, 10, 13, 19]
        counts_old = [2, 7, 3]

        pivots_new = [2, 4, 10, 13, 23]
        counts_new = []

        self.rank.__class__.repartition_bin_counts(pivots_old, counts_old,
                                                   pivots_new, counts_new)
        self.assertAlmostEqual(sum(counts_old), sum(counts_new))
        self.assertAlmostEqual(counts_new[0], 0.2857, 3)
        self.assertAlmostEqual(counts_new[3], 3.0, 3)
        return

    def tearDown(self):
        return
예제 #5
0
def topThreeNum(request):
    if request.method == 'POST':
        username = json.loads(request.body.decode()).get('username')

        try:
            anchorDict = {}
            userId = models.User.objects.get(userName=username, isAnchor=0)
            # rank data
            room = models.UserStatistics.objects.filter(
                userId_id=userId).annotate(rank=Rank('num'))
            # get related statistics
            for i in range(room.count()):
                name = models.User.objects.get(
                    userId=room[i].roomId_id).userName
                rank = room[i].rank
                num = room[i].num
                if (rank > 3):
                    break
                newDict = {name: {'rank': rank, 'num': num}}
                anchorDict.update(newDict)

        except:
            anchorDict = 0

    ret = anchorDict

    return HttpResponse(json.dumps(ret), content_type="application/json")
예제 #6
0
    def __init__(self, num_ranks: int, timestep: int,
                 vpic_reader: VPICReader) -> None:
        self.vpic_reader = vpic_reader
        self.num_ranks = num_ranks
        self.time_step = timestep

        self.ranks = [Rank(vpic_reader, ridx) for ridx in range(num_ranks)]
        # all data, including what's not yet produced
        self.ranks_data = []
        # data that has been produced. reset after reneg
        self.ranks_produced = [[] for ridx in range(num_ranks)]
        # data that has been produced, flattened. reset after reneg.
        self.ranks_produced_flattened = []
        self.ranks_cursors = []
        """
        number of bins finally returned by the renegotiation.
        usually equal to the number of ranks, but configurable in case we want to
        find out 'how much information' the global distribution contains
        """
        self.num_bins_final = num_ranks
        """
        number of pivots gathered from each rank. ideally, this wouldn't be a function
        of the scale. we hope that in practice, this is some logarithmic function of
        scale or something
        """
        self.num_pivots_sent = num_ranks * 4
        """
        number of counters maintained by each rank to construct the pivots to be sent
        in the previous step. 1x-2x should be sufficient if we expect the distribution
        to change slowly. we don't think (yet) that this needs to be a function of scale
        """
        self.num_pivots_stored = self.num_pivots_sent * 2

        return
예제 #7
0
def process_comparison(db, venue_id, user, sorted_items, new_item,
                       alpha_annealing=0.6):
    """ Function updates quality distributions and rank of submissions (items).

    Arguments:
        - sorted_items is a list of submissions id sorted by user such that
        rank(sorted_items[i]) > rank(sorted_items[j]) for i > j

        - new_item is an id of a submission from sorted_items which was new
        to the user. If sorted_items contains only two elements then
        new_item is None.
    """
    if sorted_items == None or len(sorted_items) <= 1:
        return None
    qdistr_param = get_qdistr_param(db, venue_id, sorted_items)
    # If qdistr_param is None then some submission does not have qualities yet,
    # therefore we cannot process comparison.
    if qdistr_param == None:
        return None
    rankobj = Rank.from_qdistr_param(sorted_items, qdistr_param,
                                     alpha=alpha_annealing)
    result = rankobj.update(sorted_items, new_item)
    # Updating the DB.
    for x in sorted_items:
        perc, avrg, stdev = result[x]
        # Updating submission table with its quality and error.
        db((db.submission.id == x) &
           (db.submission.venue_id == venue_id)).update(quality=avrg, error=stdev)
        # Saving then latest rank update date.
        db(db.venue.id == venue_id).update(latest_rank_update_date = datetime.utcnow())
예제 #8
0
def rerun_processing_comparisons(db,
                                 venue_id,
                                 alpha_annealing=0.5,
                                 run_twice=False):

    # We reset the ranking to the initial values.
    # Gets a ranker object to do the ranking, initialized with all the submissions with
    # their default stdev and avg.
    sub = db(db.submission.venue_id == venue_id).select(db.submission.id)
    items = []
    qdistr_param = []
    for x in sub:
        items.append(x.id)
        qdistr_param.append(AVRG)
        qdistr_param.append(STDEV)
    rankobj = Rank.from_qdistr_param(items,
                                     qdistr_param,
                                     alpha=alpha_annealing)

    # Processes the list of comparisons.
    result = None
    comparison_list = db(db.comparison.venue_id == venue_id).select(
        orderby=db.comparison.date)
    for comp in comparison_list:
        # Processes the comparison, if valid.
        if comp.is_valid is None or comp.is_valid == True:
            # Reverses the list.
            sorted_items = util.get_list(comp.ordering)[::-1]
            if len(sorted_items) < 2:
                continue
            result = rankobj.update(sorted_items, new_item=comp.new_item)
    if run_twice:
        comparison_list = db(db.comparison.venue_id == venue_id).select(
            orderby=~db.comparison.date)
        for comp in comparison_list:
            # Processes the comparison, if valid.
            if comp.is_valid is None or comp.is_valid == True:
                # Reverses the list.
                sorted_items = util.get_list(comp.ordering)[::-1]
                if len(sorted_items) < 2:
                    continue
                result = rankobj.update(sorted_items, new_item=comp.new_item)

    # Writes the updated statistics to the db.  Note that result contains the result for
    # all the ids, due to how the rankobj has been initialized.
    if result is None:
        return
    for x in items:
        perc, avrg, stdev = result[x]
        db((db.submission.id == x)
           & (db.submission.venue_id == venue_id)).update(quality=avrg,
                                                          error=stdev,
                                                          percentile=perc)
    # Saving the latest rank update date.
    description = "Ranking without reputation system. All comparisons are used in chronological order"
    db(db.venue.id == venue_id).update(
        latest_rank_update_date=datetime.utcnow(),
        ranking_algo_description=description)
예제 #9
0
 def annotate_results_rank(self, results):
     """
     Annotate results list with lower_rank and upper_rank.
     The result cannot be filtered, so use get_result_id_rank_list function to get the rank list.
     """
     results = results.annotate(
         lower_rank=Rank("result"),
         upper_rank=UpperRank("result"),
     )
     return results
예제 #10
0
 def test_straightflush(self):
     self.royal = False
     if self.test_flush():
         self.flush_ranks = []
         for card in self.cards:
             if card.suit.value == self.flush_suit:
                 self.flush_ranks.append(card.rank)
         if self.flush_ranks[4] == Rank('T'):
             self.royal = True
             return True
         for index in range(len(self.flush_ranks) - 4):
             if ranks.index(self.flush_ranks[index].value) - ranks.index(
                     self.flush_ranks[index + 4].value) == 4:
                 self.sf_lead = self.flush_ranks[index].value
                 return True
         if Rank('A') in self.flush_ranks and self.flush_ranks[
                 len(self.flush_ranks) - 4] == Rank('5'):
             self.sf_lead = '5'
             return True
     return False
def cardsInOrder(ranks):
    sortedRanks = list(map(createRank, ranks))
    sortedRanks.sort()
    asRank = Rank('A')
    if (asRank in sortedRanks):
        sortedRanks.pop(0)
        for rank in sortedRanks:
            if (rank == sortedRanks[len(sortedRanks) - 1]):
                continue
            if (rank.getNextValue() not in sortedRanks):
                return False
        return (Rank('2') in sortedRanks or Rank('K') in sortedRanks)
    else:
        for rank in sortedRanks:
            if (rank == sortedRanks[len(sortedRanks) - 1]):
                continue
            if (rank.getNextValue() not in sortedRanks):
                return False

    return True
예제 #12
0
class TestParseUrl(unittest.TestCase):

    rank = Rank()

    def test_CountRankPersonInPage(self):
        self.assertEqual(self.rank.counts_in(page, words_dict), {1: 4, 2: 4})

    def test_CountRankPersonInEmptyPage(self):
        self.assertEqual(self.rank.counts_in(empty_page, words_dict), {
            1: 0,
            2: 0
        })
예제 #13
0
    def __init_status(self):

        # 선
        random.shuffle(self.players)
        # self.first = self.players[0]

        # 족보
        with open('ranks.csv', 'r', encoding='utf-8') as f:
            csv_reader = csv.reader(f)
            for l in csv_reader:
                comb = set(zip(l[2::4], l[3::4], l[4::4], l[5::4]))
                self.ranks.append(
                    Rank(name=l[0], ranking=l[1], combination=comb))
예제 #14
0
def evaluate_contributors(db, venue_id):
    """This function evaluates reviewers for a venue.
    Currently, this based on last comparisons made by each reviewer.
    TODO(luca,michael): should we use all comparisons instead?"""

    items, qdistr_param, _ = get_all_items_qdistr_param_and_users(db, venue_id)
    if items == None or len(items) == 0:
        return None
    # Obtaining list of users who did comparisons.
    comp_r = db(db.comparison.venue_id == venue_id).select(db.comparison.user)
    list_of_users = [x.user for x in comp_r]
    list_of_users = list(set(list_of_users))

    rankobj = Rank.from_qdistr_param(items, qdistr_param, cost_obj=None)
    for user in list_of_users:
        last_comparison = db((db.comparison.user == user)
                             & (db.comparison.venue_id == venue_id)).select(
                                 orderby=~db.comparison.date).first()
        if last_comparison == None:
            # Deleting the db.user_accuracy fot this venu_id and user.
            db((db.user_accuracy.venue_id == venue_id)
               & (db.user_accuracy.user == user)).delete()
            continue
        ordering = util.get_list(last_comparison.ordering)
        ordering = ordering[::-1]
        val = rankobj.evaluate_ordering(ordering)
        # Normalization
        num_subm_r = db(db.venue.id == venue_id).select(
            db.venue.number_of_submissions_per_reviewer).first()
        if num_subm_r is None or num_subm_r.number_of_submissions_per_reviewer is None:
            # For compatability with venues which do not have the constant.
            num_subm = 5
        else:
            num_subm = num_subm_r.number_of_submissions_per_reviewer
        # TODO(michael): num_subm can be zero, take care of it.
        val = min(1, val / float(num_subm))
        # Writing to the DB.
        db.user_accuracy.update_or_insert(
            (db.user_accuracy.venue_id == venue_id) &
            (db.user_accuracy.user == user),
            venue_id=venue_id,
            user=user,
            accuracy=val,
            reputation=None,
            n_ratings=len(ordering))
    # Saving the latest user evaluation date.
    db(db.venue.id == venue_id).update(
        latest_reviewers_evaluation_date=datetime.utcnow())
예제 #15
0
def rerun_processing_comparisons(db, venue_id, alpha_annealing=0.5, run_twice=False):

    # We reset the ranking to the initial values.
    # Gets a ranker object to do the ranking, initialized with all the submissions with
    # their default stdev and avg. 
    sub = db(db.submission.venue_id == venue_id).select(db.submission.id)
    items = []
    qdistr_param = []
    for x in sub:
        items.append(x.id)
        qdistr_param.append(AVRG)
        qdistr_param.append(STDEV)
    rankobj = Rank.from_qdistr_param(items, qdistr_param, alpha=alpha_annealing)

    # Processes the list of comparisons.
    result = None
    comparison_list = db(db.comparison.venue_id == venue_id).select(orderby=db.comparison.date)
    for comp in comparison_list:
	# Processes the comparison, if valid.
	if comp.is_valid is None or comp.is_valid == True:
	    # Reverses the list.
	    sorted_items = util.get_list(comp.ordering)[::-1]
	    if len(sorted_items) < 2:
		continue
	    result = rankobj.update(sorted_items, new_item=comp.new_item)
    if run_twice:
	comparison_list = db(db.comparison.venue_id == venue_id).select(orderby=~db.comparison.date)
	for comp in comparison_list:
	    # Processes the comparison, if valid.
	    if comp.is_valid is None or comp.is_valid == True:
		# Reverses the list.
		sorted_items = util.get_list(comp.ordering)[::-1]
		if len(sorted_items) < 2:
		    continue
		result = rankobj.update(sorted_items, new_item=comp.new_item)

    # Writes the updated statistics to the db.  Note that result contains the result for
    # all the ids, due to how the rankobj has been initialized.
    if result is None:
        return
    for x in items:
        perc, avrg, stdev = result[x]
        db((db.submission.id == x) &
           (db.submission.venue_id == venue_id)).update(quality=avrg, error=stdev, percentile=perc)
    # Saving the latest rank update date.
    description = "Ranking without reputation system. All comparisons are used in chronological order"
    db(db.venue.id == venue_id).update(latest_rank_update_date = datetime.utcnow(),
                                    ranking_algo_description = description)
예제 #16
0
def get_ranking_data(course_class, ranking_size):
    ranking = Grade.objects.values('enrollment__student__id').annotate(
        total=Sum(
            Case(When(is_canceled=True, then=0),
                 When(assignment_task__points=None, then=F('score')),
                 default=F('score') * F('assignment_task__points'),
                 output_field=IntegerField())),
        full_name=F('enrollment__student__full_name'),
        student_id=F('enrollment__student__id'),
    ).annotate(
        # this "dense_rank" was throwing an error sometimes, randomly
        # it was not finding the previous "total" annotation
        # so I put it in another "annotate" to respect the dependency
        dense_rank=Rank('total'), ).filter(
            enrollment__course_class=course_class).order_by(
                '-total', 'full_name')[:ranking_size]
    # print(ranking.query)

    return ranking
예제 #17
0
def evaluate_contributors(db, venue_id):
    """This function evaluates reviewers for a venue.
    Currently, this based on last comparisons made by each reviewer.
    TODO(luca,michael): should we use all comparisons instead?"""

    items, qdistr_param, _ = get_all_items_qdistr_param_and_users(db, venue_id)
    if items == None or len(items) == 0:
        return None
    # Obtaining list of users who did comparisons.
    comp_r = db(db.comparison.venue_id == venue_id).select(db.comparison.user)
    list_of_users = [x.user for x in comp_r]
    list_of_users = list(set(list_of_users))

    rankobj = Rank.from_qdistr_param(items, qdistr_param, cost_obj=None)
    for user in list_of_users:
        last_comparison = db((db.comparison.user == user)
            & (db.comparison.venue_id == venue_id)).select(orderby=~db.comparison.date).first()
        if last_comparison == None:
            # Deleting the db.user_accuracy fot this venu_id and user.
            db((db.user_accuracy.venue_id == venue_id) & (db.user_accuracy.user == user)).delete()
            continue
        ordering = util.get_list(last_comparison.ordering)
        ordering = ordering[::-1]
        val = rankobj.evaluate_ordering(ordering)
        # Normalization
        num_subm_r = db(db.venue.id == venue_id).select(db.venue.number_of_submissions_per_reviewer).first()
        if num_subm_r is None or num_subm_r.number_of_submissions_per_reviewer is None:
            # For compatability with venues which do not have the constant.
            num_subm = 5
        else:
            num_subm = num_subm_r.number_of_submissions_per_reviewer
        # TODO(michael): num_subm can be zero, take care of it.
        val = min(1, val/float(num_subm))
        # Writing to the DB.
        db.user_accuracy.update_or_insert((db.user_accuracy.venue_id == venue_id) &
                                          (db.user_accuracy.user == user),
                                           venue_id = venue_id,
                                           user = user,
                                           accuracy = val,
                                           reputation = None,
                                           n_ratings = len(ordering) )
    # Saving the latest user evaluation date.
    db(db.venue.id == venue_id).update(latest_reviewers_evaluation_date = datetime.utcnow())
예제 #18
0
    def time_cube():
        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                            level=logging.DEBUG)
        import time
        from state import State
        with open("../test_sgf/test1.sgf", 'r') as fin:
            game = gomill.sgf.Sgf_game.from_string(fin.read())

        tt0 = 0
        tt1 = 0
        it = 0

        for i in xrange(2):
            board, movepairs = gomill.sgf_moves.get_setup_and_moves(game)
            history = []
            for color, move in movepairs:
                if move:
                    it += 1
                    row, col = move
                    board.play(row, col, color)

                    s = time.clock()
                    c1 = get_cube_tian_zhu_2015(
                        State(board, None, history,
                              BrWr(Rank.from_string('1p'), None)),
                        gomill.common.opponent_of(color))
                    #c1 = get_cube_clark_storkey_2014(State(board, None, history), gomill.common.opponent_of(color))
                    tt0 += time.clock() - s

                    #s = time.clock()
                    #c2 = get_cube_clark_storkey_2014_2(State(board, None, history), gomill.common.opponent_of(color))
                    #tt1 += time.clock() - s

                    #assert np.array_equal(c1, c2)

                    history.append((color, move))

        logging.debug("tt0 = %.3f, %.5f per one " % (tt0, tt0 / it))
        logging.debug("tt1 = %.3f, %.5f per one " % (tt1, tt1 / it))
예제 #19
0
    def test_cube():
        import gomill.boards, gomill.ascii_boards, gomill.common

        from state import State

        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                            level=logging.DEBUG)

        b = gomill.boards.Board(3)
        moves = [('b', (0, 1)), ('w', (2, 0)), ('b', (1, 1))]

        b2 = gomill.boards.Board(3)
        for col, (x, y) in moves:
            b.play(x, y, col)
            b2.play(2 - x, y, col)  # mirror s.t. we have the same coords
            # with numpy arrays

        logging.debug("\n" + gomill.ascii_boards.render_board(b2))
        cube = get_cube_tian_zhu_2015(
            State(b, None, moves, BrWr(Rank.from_string('1p'), None)), 'w')
        for a in xrange(cube.shape[0]):
            logging.debug("%d\n%s" % (a, cube[a]))
        logging.debug("\n" + str(get_label_future3_exp(moves, 3)))
예제 #20
0
def process_comparison(db,
                       venue_id,
                       user,
                       sorted_items,
                       new_item,
                       alpha_annealing=0.6):
    """ Function updates quality distributions and rank of submissions (items).

    Arguments:
        - sorted_items is a list of submissions id sorted by user such that
        rank(sorted_items[i]) > rank(sorted_items[j]) for i > j

        - new_item is an id of a submission from sorted_items which was new
        to the user. If sorted_items contains only two elements then
        new_item is None.
    """
    if sorted_items == None or len(sorted_items) <= 1:
        return None
    qdistr_param = get_qdistr_param(db, venue_id, sorted_items)
    # If qdistr_param is None then some submission does not have qualities yet,
    # therefore we cannot process comparison.
    if qdistr_param == None:
        return None
    rankobj = Rank.from_qdistr_param(sorted_items,
                                     qdistr_param,
                                     alpha=alpha_annealing)
    result = rankobj.update(sorted_items, new_item)
    # Updating the DB.
    for x in sorted_items:
        perc, avrg, stdev = result[x]
        # Updating submission table with its quality and error.
        db((db.submission.id == x)
           & (db.submission.venue_id == venue_id)).update(quality=avrg,
                                                          error=stdev)
        # Saving then latest rank update date.
        db(db.venue.id == venue_id).update(
            latest_rank_update_date=datetime.utcnow())
예제 #21
0
    def time_cube():
        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                            level=logging.DEBUG)
        import time
        from state import State
        with open("../test_sgf/test1.sgf", 'r') as fin:
            game = gomill.sgf.Sgf_game.from_string(fin.read())

        tt0 = 0
        tt1 = 0
        it = 0

        for i in xrange(2):
            board, movepairs = gomill.sgf_moves.get_setup_and_moves(game)
            history = []
            for color, move in movepairs:
                if move:
                    it += 1
                    row, col = move
                    board.play(row, col, color)


                    s = time.clock()
                    c1 = get_cube_tian_zhu_2015(State(board, None, history, BrWr(Rank.from_string('1p'), None)), gomill.common.opponent_of(color))
                    #c1 = get_cube_clark_storkey_2014(State(board, None, history), gomill.common.opponent_of(color))
                    tt0 += time.clock() - s

                    #s = time.clock()
                    #c2 = get_cube_clark_storkey_2014_2(State(board, None, history), gomill.common.opponent_of(color))
                    #tt1 += time.clock() - s

                    #assert np.array_equal(c1, c2)

                    history.append((color, move))

        logging.debug("tt0 = %.3f, %.5f per one "%(tt0, tt0/it))
        logging.debug("tt1 = %.3f, %.5f per one "%(tt1, tt1/it))
예제 #22
0
    def test_cube():
        import gomill.boards,  gomill.ascii_boards, gomill.common

        from state import State

        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                        level=logging.DEBUG)

        b = gomill.boards.Board(3)
        moves = [('b', (0,1)),
                 ('w', (2,0)),
                 ('b', (1,1))]

        b2 = gomill.boards.Board(3)
        for col, (x,y) in moves:
            b.play(x,y, col)
            b2.play(2-x,y, col) # mirror s.t. we have the same coords
                                # with numpy arrays

        logging.debug("\n"+gomill.ascii_boards.render_board(b2))
        cube = get_cube_tian_zhu_2015(State(b, None, moves, BrWr(Rank.from_string('1p'), None)), 'w')
        for a in xrange(cube.shape[0]):
            logging.debug("%d\n%s"%(a,cube[a]))
        logging.debug("\n"+ str(get_label_future3_exp(moves, 3)))
예제 #23
0
 def make_deck(self):
     for rank in self.card_ranks:
         for suit in self.card_suits:
             yield Card(Rank(rank), Suit(suit))
def test_10_equals():
    assert Rank("10") == Rank("10")
def test_2_equals():
    assert Rank("2") == Rank("2")
def test_sort():
    [Rank("2"), Rank("Q"), Rank("A"), Rank("4")].sort() == [Rank("A"), Rank("2"), Rank("4"), Rank("Q")]
def test_A_equals():
    assert Rank("A") == Rank("A")
예제 #28
0
    outname = '../paper/figures/synth.wav'
    s = utilities.mk_synth(pf[0], 1, wp[2], pf, c)
    utilities.write_wav(s, wp[2], outname)
    wav, wp, pxx, fxx, p, c, pf = f(outname)
    print '%s: %s' % (outname, pf)

    pxx, fxx = utilities.get_psd(s, wp[2])
    p, c = utilities.get_peaks(pxx, n)
    pf = [fxx[i] for i in p]
    print 'synth: %s' % pf


if __name__ == '__main__':
    n = 5

    if len(sys.argv) == 2:
        n = int(sys.argv[1])

    if n < 2:
        n = 2

    ranks = []

    for wavdir, wavlist in wavfiles.iteritems():
        ranks.append(Rank(wavdir, wavlist, n))

    r = ranks[0]
    print r

    r.chuck()
예제 #29
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import remote
import threading
import time
from video import Video
from vr import VR
from rank import Rank
import asyncio

if __name__ == '__main__':
    Video().start()
    VR().start()
    Rank().start()

    asyncio.get_event_loop().run_forever()

#     while input() != 'q':
#         time.sleep(1)
예제 #30
0
def get_item(db, venue_id, user, old_items,
             can_rank_own_submissions=False,
             rank_cost_coefficient=0):
    """
    If a user did not have items to rank then old_items is None or empty string.

    If rank_cost_coefficient is equal zero then no cost function is used which
    corresponds to treating each submission equally.

    Description of a sampling method:
        For each submission we count how many time it was assigned as a task.
        Choose subset (rare_items) of submissions which have the smallest frequency.
        Then we compute probability of all possible mistaken comparisons
        between rare_items and old_items (item from previous tasks).
        After that we randomly sample an item proportional of probability of
        a mistake with the item.

        Note that code wich randomly samples item is in Rank.sample_item(...).
        To ensure that sampled item is from rare_items we initialize
        Rank object (rankobj) only with pool items which is a union of
        rare_items  and old_items.
        This way item is sampled as described above.
    """
    if old_items is None:
        old_items = []
    items, qdistr_param, _ = get_all_items_qdistr_param_and_users(db, venue_id)
    # If items is None then some submission does not have qualities yet,
    # we need to know qualities of for all submission to correctly choose an
    # item.
    if items == None or len(items) == 0:
        return None
    # Specifying cost object which implements cost function.
    cost_obj = Cost(cost_type='rank_power_alpha',
                   rank_cost_coefficient=rank_cost_coefficient)
    if rank_cost_coefficient == 0:
        cost_obj = None
    if not can_rank_own_submissions:
        # Find submission that is authored by the user.
        submission_ids = db((db.submission.venue_id == venue_id) &
                                (db.submission.user == user)).select(db.submission.id)
        users_submission_ids = [x.id for x in submission_ids]
    else:
        users_submission_ids = []
    # Counting how many times each submission was assigned.
    # TODO(michael): use the field in the submissions, once we confirm that this works.
    frequency = []
    for subm_id in items:
        if (subm_id not in users_submission_ids and
            subm_id not in old_items):
            count = db((db.task.submission_id == subm_id) &
                       (db.task.venue_id == venue_id)).count()
            frequency.append((subm_id, count))
    # Do we have items to sample from?
    if len(frequency) == 0:
        return None
    # Now let's find submissions which have the smalles count number.
    min_count = min([x[1] for x in frequency])
    rare_items = [x[0] for x in frequency if x[1] == min_count]
    if len(rare_items) == 1:
        return rare_items[0]
    # Constructing pool of items.
    pool_items = rare_items[:]
    pool_items.extend(old_items)
    # Fetching quality distribution parameters.
    qdistr_param_pool = []
    for subm_id in pool_items:
        idx = items.index(subm_id)
        qdistr_param_pool.append(qdistr_param[2 * idx])
        qdistr_param_pool.append(qdistr_param[2 * idx + 1])
    rankobj = Rank.from_qdistr_param(pool_items, qdistr_param_pool,
                                     cost_obj=cost_obj)
    return rankobj.sample_item(old_items, black_items=[])
예제 #31
0
def run_reputation_system(db, venue_id, alpha_annealing=0.5,
                          num_of_iterations=4):
    """ Function calculates:
            - reputation for each user
            - user precision (quality as a reviewer)
    """
    # Fetching submission id and authors.
    # TODO(michael): take care of a case when a user has multiple submission.
    sub = db(db.submission.venue_id == venue_id).select(db.submission.id,
                                                        db.submission.author)
    items = []
    author2item = {}
    qdistr_param_default = []
    for x in sub:
        items.append(x.id)
        author2item[x.author] = x.id
        qdistr_param_default.append(AVRG)
        qdistr_param_default.append(STDEV)
    # Fetching list of comparisons.
    comparison_list_r = db(db.comparison.venue_id == venue_id).select(orderby=db.comparison.date)
    # Creating dictionaries
    # author: reputation
    # author: accuracy (confedence or reviewer's grade)
    # author: last ordering
    author2rep, author2accuracy, author2ordering = {}, {}, {}
    # Initializing these dictionaries.
    for comp in comparison_list_r:
        # Check if comparison is valid.
        if comp.is_valid is None or comp.is_valid == True:
            # Reverses the list.
            sorted_items = util.get_list(comp.ordering)[::-1]
            if len(sorted_items) < 2:
                continue
            author2ordering[comp.author] = sorted_items
            # Initializing reviewers reputation and accuracy.
            author2rep[comp.author] = alpha_annealing
            author2accuracy[comp.author] = -1
    # Okay, now we are ready to run main iterations.
    result = None
    for it in xrange(num_of_iterations):
        # In the beginning of iteration initialize rankobj with default
        # items qualities.
        rankobj = Rank.from_qdistr_param(items, qdistr_param_default,
                                         alpha=alpha_annealing)
        # Okay, now we update quality distributions with comparisons
        # using reputation of users as annealing coefficient.
        for comp in comparison_list_r:
            # Processes the comparison, if valid.
            if comp.is_valid is None or comp.is_valid == True:
                # Reverses the list.
                sorted_items = util.get_list(comp.ordering)[::-1]
                if len(sorted_items) < 2:
                    continue
                alpha = author2rep[comp.author]
                annealing_type='before_normalization_uniform'
                #annealing_type='after_normalization'
                #annealing_type='before_normalization_gauss'
                result = rankobj.update(sorted_items, new_item=comp.new_item,
                                        alpha_annealing=alpha,
                                        annealing_type=annealing_type)
        # In the end of the iteration compute reputation to use in the next iteration.
        if result is None:
            return
        for user_id in author2rep:
            if author2item.has_key(user_id):
                perc, avrg, stdev = result[author2item[user_id]]
                rank = perc / 100.0
            else:
                rank = 1 # TODO(michael): Should we trust unknown reviewer?
            ordering = author2ordering[user_id]
            accuracy = rankobj.evaluate_ordering_using_dirichlet(ordering)
            author2accuracy[user_id] = accuracy
            # Computer user's reputation.
            author2rep[user_id] = (rank * accuracy) ** 0.5
    # Updating the DB with submission ranking, users' accuracy and reputation.
    for x in items:
        perc, avrg, stdev = result[x]
        db((db.submission.id == x) &
           (db.submission.venue_id == venue_id)).update(quality=avrg, error=stdev, percentile=perc)
    for user_id in author2accuracy:
        db.user_accuracy.update_or_insert((db.user_accuracy.venue_id == venue_id) &
                                  (db.user_accuracy.user_id == user_id),
                                   venue_id = venue_id,
                                   user_id = user_id,
                                   accuracy = author2accuracy[user_id],
                                   reputation = author2rep[user_id],
                                   n_ratings = len(author2ordering[user_id]) )
    # Saving evaluation date.
    t = datetime.utcnow()
    db(db.venue.id == venue_id).update(latest_reviewers_evaluation_date = t,
                                       latest_rank_update_date = t)
    # Computing final grades for convenience.
    # TODO(michael): instead of calling method we can compute final directly
    # to optimize db access.
    compute_final_grades(db, venue_id)
예제 #32
0
파일: card.py 프로젝트: ygkn/ros2-porker
 def __init__(self, suit_number, rank_number):
     self.suit = Suit(suit_number)
     self.rank = Rank(rank_number)
def test_Q_equals():
    assert Rank("Q") == Rank("Q")
def test_K_equals():
    assert Rank("K") == Rank("K")
예제 #35
0
def run_reputation_system(db, venue_id, alpha_annealing=0.5,
                          num_of_iterations=4, last_compar_param=10):
    """ Function calculates submission qualities, user's reputation, reviewer's
    quality and final grades.
    Arguments:
        - last_compar_param works as a switch between two types of reputation system
        If the argument is None then we update using all comparisons one time in chronological order.
        Otherwise we use "small alpha" approach, where last_compar_param is
        number of iterations.
    """
    # Reading the DB to get submission and user information.
    # Lists have l suffix, dictionaries user -> val have d suffix.
    user_l, subm_l, ordering_l, subm_d, ordering_d = read_db_for_rep_sys(db, venue_id, last_compar_param)
    # Initializing the rest of containers.
    qdistr_param_default = []
    for subm in subm_l:
        qdistr_param_default.append(AVRG)
        qdistr_param_default.append(STDEV)
    rep_d = {user: alpha_annealing for user in user_l}
    accuracy_d = {user: 0 for user in user_l}

    # Okay, now we are ready to run main iterations.
    result = None
    for it in xrange(num_of_iterations):
        # In the beginning of iteration initialize rankobj with default
        # submissions qualities.
        rankobj = Rank.from_qdistr_param(subm_l, qdistr_param_default,
                                         alpha=alpha_annealing)
        # Okay, now we update quality distributions with comparisons
        # using reputation of users as annealing coefficient.
        if last_compar_param is None:
            # Using all comparisons in chronological order.
            for ordering, user in ordering_l:
                alpha = rep_d[user]
                result = rankobj.update(ordering, alpha_annealing=alpha)
        else:
            # Using only last comparisons and iterating many times with small alpha.
            for i in xrange(last_compar_param):
                # Genarating random permutation.
                idxs = range(len(ordering_l))
                random.shuffle(idxs)
                for idx in idxs:
                    ordering, user = ordering_l[idx]
                    alpha = rep_d[user]
                    alpha = 1 - (1 - alpha) ** (1.0/(4*last_compar_param))
                    #alpha = alpha / float(2*last_compar_param)
                    result = rankobj.update(ordering, alpha_annealing=alpha)
        if result is None:
            return
        # Computing reputation.
        for user in rep_d:
            if subm_d.has_key(user):
                perc, avrg, stdev = result[subm_d[user]]
                rank = perc / 100.0
            else:
                rank = 0.5 # TODO(michael): Should we trust unknown reviewer?
            if ordering_d.has_key(user):
                ordering = ordering_d[user]
                accuracy = rankobj.evaluate_ordering_using_dirichlet(ordering)
            else:
                accuracy = 0
            accuracy_d[user] = accuracy
            # Computer user's reputation.
            rep_d[user] = (rank * accuracy) ** 0.5

    # Computing submission grades.
    subm_grade_d = {}
    for user, subm in subm_d.iteritems():
        perc, avrg, stdev = result[subm]
        subm_grade_d[subm] = perc / 100.0
    # Computing final grades.
    perc_final_d, final_grade_d = compute_final_grades_helper(user_l, subm_grade_d, rep_d)
    if last_compar_param is None:
        description = "Reputation system on all comparisons in chronological order"
        if num_of_iterations == 1:
            description = "Ranking without reputation system. All comparisons are used in chronological order"
    else:
        description = "Reputation system with small alpha and only last comparisons"
        if num_of_iterations == 1:
            description = "No reputation system and small alpha !?!?"
    # Writing to the BD.
    write_to_db_for_rep_sys(db, venue_id, result, subm_l, user_l, ordering_d,
                            accuracy_d, rep_d, perc_final_d, final_grade_d,
                            ranking_algo_description=description)
예제 #36
0
from menu import Menu
from play import Play
from fps import Fps
from rank import Rank
from difficulty import Difficulty
import GVar

window = Window(GVar.WIDTH, GVar.HEIGHT)
window.set_title("Space Invaders")
window.set_background_color((0, 0, 0))

keyboard = Keyboard()
menu = Menu(window)
play = Play(window, "./assets/lvl/level_1.txt")
difficulty_menu = Difficulty(window)
rank = Rank(window)
clock = Clock()
fps = Fps(window)

window.update()
while GVar.STATE != 4:
    window.set_background_color((0, 0, 0))
    if keyboard.key_pressed("esc"):
        GVar.STATE = 0
        play.__init__(window, "./assets/lvl/level_1.txt")
    if GVar.STATE == 0:
        menu.run()
    if GVar.STATE == 1:
        if GVar.DIFC_CHOSEN == True:
            play.__init__(window, "./assets/lvl/level_1.txt")
            GVar.DIFC_CHOSEN = False
예제 #37
0
    # Pop the csv object to reduce memory usage
    del data_table
    print("Runtime : %.4f" % (runtime - startTime))

    # Create Column Combination
    column_combination = ColumnCombination(data_dict).create_combination()

    print("Column combination Created.")
    runtime2 = time.time()
    print("Runtime : %.4f" % (runtime2 - runtime))

    # Create Scenario Dictionary - Transformation + Guessing Scenario value
    scenario_dict = Transformation(data_dict,
                                   column_combination).transformation()

    print("Scenario dictionary created")
    runtime3 = time.time()
    print("Runtime : %.4f" % (runtime3 - runtime2))

    # Calculate Scenario score and Rank. Top 20 will be printed
    picked_scenario = Rank(scenario_dict).rank()

    # Final Time Check
    endTime = time.time() - startTime

    json = JsonGenerator(picked_scenario).generate_json()
    print("Program Runtime : %.4f" % endTime)

    print(json)
def test_J_equals():
    assert Rank("J") == Rank("J")