def add_states_toQ(states, Q_table) :
    array = [ms.Multiset(states["hands"]), ms.Multiset(states["fields"]),
        ms.Multiset(states["cemetary"])]
    array += [0]*(Q_table.shape[1]-3)
    s = pd.Series(array, index=Q_table.columns)
    Q_table = Q_table.append(s, ignore_index=True)
    return(Q_table)
示例#2
0
def evaluate_reconstruction_accuracy(padded_true_batch_first,
                                     padded_suggestions_seq_first,
                                     tb_write=None):
    total_elems = 0
    total_bags = padded_true_batch_first.shape[0]

    matched_elems = 0
    matched_bags = 0

    table_rows = ['| True  |   Predicted |   ']

    for true_elems, suggested_elems in zip(
            padded_true_batch_first.cpu().numpy(),
            padded_suggestions_seq_first.cpu().numpy().T):
        true_elems = multiset.Multiset(
            true_elems[true_elems != mchef_config.PAD_VALUE].tolist())
        suggested_elems = multiset.Multiset(suggested_elems[
            suggested_elems != mchef_config.PAD_VALUE].tolist())
        table_rows.append('| ' + ' | '.join([
            ','.join(map(str, true_elems)), ','.join(map(str, suggested_elems))
        ]) + '|   ')

        total_elems += len(true_elems)
        matched_bags += int(true_elems == suggested_elems)
        matched_elems += len(true_elems.intersection(suggested_elems))

    if tb_write is not None:
        tb_write.add_text('Reconstruction Results', '\n'.join(table_rows))

    return float(matched_bags) / total_bags, float(matched_elems) / total_elems
示例#3
0
def _rst_parseval(preds, golds):
    """
    :type preds: list of list of (int, int, str, str)
    :type golds: list of list of (int, int, str, str)
    :rtype: {str: {str: Any}}
    """
    assert len(preds) == len(golds)

    scores = {}  # {str: {str: Any}}

    total_ok_dict = {}
    total_pred_dict = {}
    total_gold_dict = {}
    for key in ["S", "S+N", "S+R", "S+N+R"]:
        total_ok_dict[key] = 0.0
        total_pred_dict[key] = 0.0
        total_gold_dict[key] = 0.0

    for pred_spans, gold_spans in zip(preds, golds):
        pred_spans_dict = {}
        gold_spans_dict = {}
        pred_spans_dict["S"] = [(b, e) for b, e, r, n in pred_spans]
        gold_spans_dict["S"] = [(b, e) for b, e, r, n in gold_spans]
        pred_spans_dict["S+N"] = [(b, e, n) for b, e, r, n in pred_spans]
        gold_spans_dict["S+N"] = [(b, e, n) for b, e, r, n in gold_spans]
        pred_spans_dict["S+R"] = [(b, e, r) for b, e, r, n in pred_spans]
        gold_spans_dict["S+R"] = [(b, e, r) for b, e, r, n in gold_spans]
        pred_spans_dict["S+N+R"] = pred_spans
        gold_spans_dict["S+N+R"] = gold_spans

        for key in ["S", "S+N", "S+R", "S+N+R"]:
            a = multiset.Multiset(pred_spans_dict[key])
            b = multiset.Multiset(gold_spans_dict[key])
            n_ok = float(len(a & b))
            n_pred = float(len(a))
            n_gold = float(len(b))
            total_ok_dict[key] += n_ok
            total_pred_dict[key] += n_pred
            total_gold_dict[key] += n_gold

    for key in ["S", "S+N", "S+R", "S+N+R"]:
        precision = float(total_ok_dict[key]) / float(total_pred_dict[key])
        recall = float(total_ok_dict[key]) / float(total_gold_dict[key])
        f1 = (2 * precision * recall) / (precision + recall)
        precision_info = "%d/%d" % (total_ok_dict[key], total_pred_dict[key])
        recall_info = "%d/%d" % (total_ok_dict[key], total_gold_dict[key])
        scores[key] = {
            "Precision": precision,
            "Recall": recall,
            "Micro F1": f1,
            "Precision_info": precision_info,
            "Recall_info": recall_info
        }

    return scores
示例#4
0
def computeOperonDifferences(operon1, operon2):
    set1 = multiset.Multiset()
    set2 = multiset.Multiset()

    for op in operon1:
        set1.add(op.split('_')[0].strip())
    for op in operon2:
        set2.add(op.split('_')[0].strip())
    set3 = set1.symmetric_difference(set2)

    return len(set3)
示例#5
0
def extract_distributions(data, max_imgs=conf.MAX_LOADED_IMAGES):
    log = section_logger(1)

    log('Extracting distributions ')

    global_objects = ms.Multiset()
    occurrences = ms.Multiset()
    images = []

    for i, image in enumerate(data['objects']):
        if i > max_imgs:
            break

        if not check_image_exists(image['image_id']):
            print('Skipping unexistent image: {}.jpg'.format(
                image['image_id']))

        if i % 25 == 0:
            log("Processing image {}".format(i))

        image_objs = {}
        image_objs['id'] = image['image_id']

        objs = ms.Multiset()
        objs_pd = {}
        total = 0
        appeared = set()

        for j, obj in enumerate(image['objects']):
            if len(obj['names']) > 0:
                obj_name = obj['names'][0]
                obj_name = simplify(obj_name)

                if len(obj_name.strip()) == 0:
                    continue

                total += 1
                appeared.add(obj_name)

                global_objects.add(obj_name)
                objs.add(obj_name)

        for obj in appeared:
            occurrences.add(obj)

        for key in objs.distinct_elements():
            objs_pd[key] = objs.get(key, 0) / total

        image_objs['pds'] = objs_pd

        images.append(image_objs)

    return global_objects, occurrences, images
def generateMutliProcessorConfigDFS(prevMultiConfig, prevProcessorIdx,
                                    L1Epsilon, VEpsilon):
    configsToReturn = []

    if multiset.Multiset(
            tuple(prevMultiConfig)) in testedMultiProcessorConfigs:
        # print('here ', prevMultiConfig)
        return configsToReturn

    # return if we have reached the max length of the multiprocessor config
    if prevProcessorIdx == NUM_PROCESSORS - 1:
        # print('here 2 ', prevMultiConfig)
        return configsToReturn

    for i in range(len(L1Epsilon)):
        currentMultiConfig = prevMultiConfig.copy()
        currentIdx = prevProcessorIdx + 1
        currentMultiConfig[currentIdx] = i
        # print(currentMultiConfig)

        configs = None
        currentUtilization = getMultiProcessorUtilization(
            currentMultiConfig, L1Epsilon, VEpsilon)

        if currentUtilization < NUM_PROCESSORS:

            if currentMultiConfig[-1] != -1:
                testedMultiProcessorConfigs.append(
                    multiset.Multiset(tuple(currentMultiConfig)))
                configsToReturn.append(currentMultiConfig)

            else:
                configs = generateMutliProcessorConfigDFS(
                    currentMultiConfig, currentIdx, L1Epsilon, VEpsilon)

                if len(configs) == 0:
                    configsToReturn.append(currentMultiConfig)
                    continue

                for i in range(len(configs)):
                    # if getSingleProcessorUtilization(configs[i], VEpsilon) > 1 - EPSILON:
                    if -1 not in configs[i]:
                        configsToReturn.append(configs[i])
                    # configsToReturn.append(configs[i])

        elif currentUtilization == NUM_PROCESSORS:
            configsToReturn.append(currentMultiConfig)

    testedMultiProcessorConfigs.append(
        multiset.Multiset(tuple(prevMultiConfig)))
    return configsToReturn
def existed_state(states, Q_table) :
    hands = ms.Multiset(states["hands"])
    fields = ms.Multiset(states["fields"])
    cemetary = ms.Multiset(states["cemetary"])
    cond = f"hands=='{hands}'"
    cond += f" & fields=='{fields}'"
    cond += f" & cemetary=='{cemetary}'"

    tf = (Q_table.hands == hands) * (Q_table.fields == fields) * (Q_table.cemetary == cemetary)
    if tf.sum() == 0 :
        return(False)
    else :
        ind = Q_table.index[tf.values][0]
        return(ind)
示例#8
0
 def test_score_all(self):
     cases = {
         '111111': (8000, ''),
         '234662': (0, '662234'),
         '222221': (900, ''),
         '444235': (450, '32'),
         '111222': (1200, ''),
         '115234': (250, '234'),
         '111554': (1100, '4'),
         '152346': (150, '2346')
     }
     for score in cases.keys():
         mset = ms.Multiset(score)
         self.assertEqual(game.score_all(mset),
                          (cases[score][0], ms.Multiset(cases[score][1])))
示例#9
0
    def __init__(self, spiel):
        pos = self.leadenPos()
        waren = multiset.Multiset(['honig', 'mandeln', 'orangen'])
        self.oase = Laden.Laden("oase", waren, 2, pos[0])
        waren = multiset.Multiset(['feigen', 'datteln', 'minze'])
        self.garten = Laden.Laden("garten", waren, 2, pos[1])
        waren = multiset.Multiset(['ring', 'halskette', 'ohrringe'])
        self.juwelier = Laden.Laden("juwelier", waren, 3, pos[2])
        waren = multiset.Multiset(['wolle', 'seide', 'leder'])
        self.faerberei = Laden.Laden("faerberei", waren, 3, pos[3])
        self.leaden = [self.oase, self.garten, self.juwelier, self.faerberei]

        pos = self.stadtPos()
        stadt0 = Stadt.Stadt(pos[0], spiel)
        stadt1 = Stadt.Stadt(pos[1], spiel)
        stadt2 = Stadt.Stadt(pos[2], spiel)
        stadt3 = Stadt.Stadt(pos[3], spiel)
        self.staedte = [stadt0, stadt1, stadt2, stadt3]

        self.gebirge = []

        positionenKategorieA = [(2, 0), (7, 0), (2, 3)]
        random.shuffle(positionenKategorieA)
        for i in range(0, 3):
            self.gebirge.append(
                Gebirge.Gebirge(i + 1,
                                self.offset_to_cube(positionenKategorieA[i])))

        positionenKategorieB = [(1, 7), (6, 7), (6, 4)]
        random.shuffle(positionenKategorieB)
        for i in range(0, 3):
            self.gebirge.append(
                Gebirge.Gebirge(i + 4,
                                self.offset_to_cube(positionenKategorieB[i])))

        positionenKategorieC = [(3, 0), (4, 3), (7, 4), (4, 5), (2, 7)]
        random.shuffle(positionenKategorieC)
        for i in range(0, 5):
            self.gebirge.append(
                Gebirge.Gebirge(i + 7,
                                self.offset_to_cube(positionenKategorieC[i])))

        positionenKategorieD = [(5, 1), (3, 3), (1, 4), (3, 5), (5, 8)]
        random.shuffle(positionenKategorieD)
        for i in range(0, 5):
            self.gebirge.append(
                Gebirge.Gebirge(i + 12,
                                self.offset_to_cube(positionenKategorieD[i])))
 def intersection(self, other):
     '''
     (SkipList Object, SkipList Object) --> (SkipList Object)
     
     Given a SkipList (self) and another SkipList object (other), return a
     third SkipList Object that resembles the insersection of self and other.
     '''
     import multiset
     #create an empty skiplist
     temp = multiset.Multiset()
     #record start values of each skiplist
     node = self.start
     node2 = other.start
     #check None cases
     if (node or node2) == None:
         return (temp)
     #move both nodes to the last row
     while (node.bot_link != None):
         node = node.bot_link
     while (node2.bot_link != None):
         node2 = node2.bot_link
     #loop through the values in the row of the first node
     while ((node.next_link.data) != None):
         val = node.next_link.data
         #count the amount of the values which are shared
         count = node.find_val(val)
         count2 = node2.find_val(val)
         if count > count2:
             for i in range(0, count2):
                 temp.inserter(val)
         elif ((count < count2) and (count2 != 0)):
             for i in range(0, count):
                 temp.inserter(val)
     return temp
示例#11
0
def get_console_bag_for_dir(directory: Optional[str]) -> multiset.Multiset:
    bag = multiset.Multiset()
    for graph in graphs_in_dir(directory):
        try:
            node_types = nx.get_node_attributes(graph, "node type")
            console_node_candidates = [
                k for k, v in node_types.items() if v == "web API"
                and graph.nodes[k].get("method") == "console.log"
            ]
            if console_node_candidates:
                cln = console_node_candidates[0]
                for u, v, eid, args in graph.in_edges(cln,
                                                      data="args",
                                                      keys=True):
                    if args:
                        jargs = json.loads(args)
                        url = jargs.get("location", {}).get("url")
                        if url:
                            bits = urlparse(url)
                            hostname = bits.hostname
                            upath = bits.path
                        else:
                            hostname = upath = None
                        bag.add(
                            ConsoleTuple(
                                jargs.get("source"),
                                jargs.get("level"),
                                get_sld(hostname) if hostname else None,
                                upath,
                            ))
        except:
            logger.exception(
                f"error processing graph in {directory} (skipping)")
    return bag
示例#12
0
 def update_multisets(self):
     multiset = {}
     for node in self.graph.nodes:
         multiset[node] = m.Multiset()
         for neighbor in nx.all_neighbors(self.graph, node):
             multiset[node].add(self.graph.nodes[neighbor]["wl_colour"])
             pass
     nx.set_node_attributes(self.graph, multiset, "neighbour_multiset")
示例#13
0
 def __init__(self, freq, period):
     self.frequency = freq
     self.period = period
     self.next_scan_time_by_iface = {}
     self.next_scan_time = 0
     self.do_not_retry_before = {}
     self.interfaces = multiset.Multiset()
     self.last_hit = 0
示例#14
0
 def set_initial_multiset(self, g):
     multiset = {}
     for node in g.nodes:
         multiset[node] = m.Multiset()
         for neighbor in nx.all_neighbors(g, node):
             multiset[node].add(g.nodes[neighbor]["wl_colour"])
             pass
     nx.set_node_attributes(g, multiset, "neighbour_multiset")
     return g
示例#15
0
def compute_tree_distance(spans1, spans2, coef):
    """
    :type spans1: list of (int, int)
    :type spans2: list of (int, int)
    :type coef: float
    :rtype: float
    """
    assert len(spans1) == len(spans2)

    spans1 = multiset.Multiset(spans1)
    spans2 = multiset.Multiset(spans2)

    assert len(spans1) == len(spans2)
    dist = len(spans1) - len(spans1 & spans2)
    dist = float(dist)

    dist = coef * dist
    return dist
示例#16
0
def get_request_bag_for_dir(dirname: Optional[str]) -> multiset.Multiset:
    bag_map = multiset.Multiset()
    for graph in find_3p_nonad_graphs(dirname):
        resource_nodes = [k for k, v in nx.get_node_attributes(graph, "node type").items() if v == "resource"]
        for k, v in nx.get_node_attributes(graph, "node type").items():
            if v == 'resource':
                url_fields = urlparse(graph.nodes[k]["url"])
                etld1 = get_sld(url_fields.hostname)
                bag_map.update((etld1, rt) for n1, n2, eid, rt in graph.in_edges(k, data="request type", keys=True))
    return bag_map
示例#17
0
def get_node_bag_for_dir(dirname: Optional[str]) -> multiset.Multiset:
    bag_map = multiset.Multiset()
    for graph in find_3p_nonad_graphs(dirname):
        node_types = nx.get_node_attributes(graph, "node type")
        html_nodes = [
            graph.nodes[k].get("tag name") for k, v in node_types.items()
            if v == "HTML element"
        ]
        bag_map.update(Counter(html_nodes))
    return bag_map
示例#18
0
def fitness(lst):
    masked = list(itr.compress(wts, lst))
    msum = sum(masked)

    lstms = ms.Multiset(masked)

    for m in ans:
        if m.issubset(lstms):
            return msum

    return (maxsum + 1)
示例#19
0
 def __init__(self, name, position, spiel):
     self.name = name
     self.bedarfe = []
     self.position = position
     self.spiel = spiel
     self.weitenWuerfel = Wuerfel.Wuerfel("W6")
     self.hoehenWuerfel = Wuerfel.Wuerfel("W4")
     self.bewegungspunkte = 0
     self.flughoehe = 0
     self.inventory = multiset.Multiset()
     self.geld = 15
     self.letzterZug = []
     ki = Ki.Ki(self, spiel)
     print("erstelle Spieler: ", self.name, "in", self.position)
示例#20
0
    def bedarfMachbar(self, bedarf):
        gesamteAuslage = multiset.Multiset()
        for laden in self.spielfeld.leaden:
            for ware in laden.auslage:
                gesamteAuslage.add(ware)
        for ware in self.spieler.inventory:
            gesamteAuslage.add(ware)

        for ware in bedarf.waren:
            if not self.wareMachbar(ware, gesamteAuslage):
                # print("nicht machbar:",bedarf,"es fehlt:",ware)
                return False

        return True
示例#21
0
        def output_mapper(dict_in):
            prediction_tokenized = dict_in['tgt']
            prediction = MolTransformerTokenizer.from_tokens(prediction_tokenized)
            prediction_split = prediction.split('.')
            pred_res = []
            for smi in prediction_split:
                try:
                    pred_res.append(rdkit_general_ops.canconicalize(smi))
                except Exception:
                    pass

            pred_res = pred_res[:1]

            out = multiset.Multiset(pred_res)
            return out
示例#22
0
def formatAndComputeOperonDifferences(operon1, operon2):
    noDuplicatesSet1 = set()
    noDuplicatesSet2 = set()

    set1 = multiset.Multiset()
    set2 = multiset.Multiset()

    operon1 = operon1.replace('-', '')
    operon1 = operon1.replace('[', '')
    operon1 = operon1.replace(']', '')

    operon2 = operon2.replace('-', '')
    operon2 = operon2.replace('[', '')
    operon2 = operon2.replace(']', '')

    operon1List = operon1.split(',')
    operon2List = operon2.split(',')

    noWhiteSpaceOperon1List = []
    noWhiteSpaceOperon2List = []

    for op in operon1List:
        set1.add(op.split('_')[0].strip())
        noDuplicatesSet1.add(op.split('_')[0].strip())
        noWhiteSpaceOperon1List.append(op.strip())

    for op in operon2List:
        set2.add(op.split('_')[0].strip())
        noDuplicatesSet2.add(op.split('_')[0].strip())
        noWhiteSpaceOperon2List.append(op.strip())

    set3 = set1.symmetric_difference(set2)
    noDuplicatesSet3 = noDuplicatesSet1.symmetric_difference(noDuplicatesSet2)

    return len(set3), noWhiteSpaceOperon1List, noWhiteSpaceOperon2List, len(
        noDuplicatesSet3)
示例#23
0
def game_full():
    dice = roll()
    print(dice)
    score, d = score_all(dice)
    print('score: ' + str(score))
    if score == 0:
        print('round score: 0')
        return 0

    while True:
        s = 0
        case = input()
        if case == 'stop':
            score += s
            break
        elif case == 'fix':
            print('enter dices: ', end='')
            inp = input()
            bad_select = False  # TODO rewrite all input checks with exceptions
            for i in inp:
                if i not in dice.difference(d):
                    bad_select = True
                    break
            if bad_select:
                print('bad input, after "fix" you must enter dice with value!')
                continue
            fix = ms.Multiset(inp)
            dice.difference_update(fix)
            s = score_all(fix)[0]
            print('fixed: ' + str(fix))
            if len(dice) == 0:
                dice = roll()
            else:
                dice = roll(len(dice))
            print(dice)
            s, d = score_all(dice)
            if s == 0:
                score = 0
                print('fire!')
                break
            else:
                print('score: ' + str(s))
                score += s
        else:
            print('bad input, use: "fix" or "stop" command')
    print('round score: ' + str(score))
    return score
示例#24
0
    def is_expected_legal_notices(self, legal_notices_path):
        """
        :param legal_notices_path: Path to legal notices
        Return True if legal notices are as expected, otherwise assert.
        :return: True if legal notices are as expected
        """

        default_legal_notices = set(self._config["DEFAULT"]["legal_notices"].split())
        legal_notices = multiset.Multiset(self._config[self.product_type]["legal_notices"].split())
        legal_notices += default_legal_notices

        stdout, stderr =  kubectl.exec(' '.join([self.name, '-- ls -R ' + legal_notices_path]))
        for line in stdout:
            logger.debug('Found legal notice ' + self.name)
            if line:
                legal_notices.remove(line, 1)
        assert len(legal_notices) == 0, 'Did not find all legal notices'
        return True
    def combine_list(self, other):
        '''
        (SkipList Object, SkipList Object) --> SkipList Object
        
        Given two SkipLists self and other, return a third SkipList object
        which contains a union of the node values of each SkipList.
        '''
        import multiset

        #create new SkipList
        temp = multiset.Multiset()
        #indetify start points
        node = self.start
        node2 = other.start
        #check nodes empty, break early dependingly
        if ((node and node2) == None):
            return temp
        elif ((node == None) and (node2 != None)):
            temp.start = other.start
            temp.end = other.end
            return temp
        elif ((node2 == None) and (node != None)):
            temp.start = self.start
            temp.end = self.end
            return temp
        #both nodes non-empty
        #move both nodes to the last row
        while (node.bot_link != None):
            node = node.bot_link
        while (node2.bot_link != None):
            node2 = node2.bot_link
        #loop through the first row of the node
        while ((node.next_link.data) != None):
            val = (node.next_link.data)
            #insert first lists values into temp
            temp.inserter(val)
            node = node.next_link
        #loop through the second node's row
        while ((node2.next_link.data) != None):
            val = (node2.next_link.data)
            #insert second list values into temp
            temp.inserter(val)
            node2 = node2.next_link
        return temp
    def remove_shared(self, other):
        '''
        (SkipList Object, SkipList Object) --> (SkipList Object)
        
        Take all values in the first SkipList (self), and remove the ones that
        are shared in the second SkipList(other).
        Returns a SkipList object
        '''
        import multiset

        node = self.start
        node2 = other.start
        #create a new skiplist
        temp = multiset.Multiset()
        #exit early in special cases where either node or node2 is None
        if node == None:
            return temp
        elif node2 == None:
            return self
        #move both nodes to the last row
        while (node.bot_link != None):
            node = node.bot_link
        while (node2.bot_link != None):
            node = node.bot_link
        #check row values in first node is contained by second row
        while ((node.next_link.data) != None):
            #count how many of the same value the first skip list contains
            val = (node.next_link.data)
            count = node.find_val(val)
            count2 = node2.find_val(val)
            if (count > count2):
                #insert the data into the skip list
                iterations = (count - count2)
                for i in range(0, interations):
                    temp.inserter(val)
            elif (count == 0):
                #insert the data into the skip list
                temp.inserter(val)
            node = node.next_link
        return (temp)
def _old_rst_parseval(preds, golds, input_lengths):
    """
    :type preds: list of list of (int, int, str, str)
    :type golds: list of list of (int, int, str, str)
    :type input_lengths: list of int
    :rtype: {str: {str: Any}}
    """
    assert len(preds) == len(golds) == len(input_lengths)

    scores = {}  # {str: {str: Any}}

    total_ok_dict = {}
    total_pred_dict = {}
    total_gold_dict = {}
    for key in ["S", "S+N", "S+R", "S+N+R"]:
        total_ok_dict[key] = 0.0
        total_pred_dict[key] = 0.0
        total_gold_dict[key] = 0.0

    for pred_spans, gold_spans, input_length in zip(preds, golds,
                                                    input_lengths):
        pred_spans_dict = {}
        gold_spans_dict = {}
        pred_spans_dict["S"] = [(b, e) for b, e, r, n in pred_spans]
        gold_spans_dict["S"] = [(b, e) for b, e, r, n in gold_spans]
        pred_spans_dict["S+N"] = [(b, e, n) for b, e, r, n in pred_spans]
        gold_spans_dict["S+N"] = [(b, e, n) for b, e, r, n in gold_spans]
        pred_spans_dict["S+R"] = [(b, e, r) for b, e, r, n in pred_spans]
        gold_spans_dict["S+R"] = [(b, e, r) for b, e, r, n in gold_spans]
        pred_spans_dict["S+N+R"] = pred_spans
        gold_spans_dict["S+N+R"] = gold_spans

        for key in ["S", "S+N", "S+R", "S+N+R"]:
            a = multiset.Multiset(pred_spans_dict[key])
            b = multiset.Multiset(gold_spans_dict[key])
            n_ok = float(len(a & b))
            n_pred = float(len(a))
            n_gold = float(len(b))

            # NOTE: Pre-terminal spans (i.e., spans of length=1) cannot be incorrect in unlabeled constituency metrics
            if key == "S":
                n_ok += input_length
                n_pred += input_length
                n_gold += input_length

            total_ok_dict[key] += n_ok
            total_pred_dict[key] += n_pred
            total_gold_dict[key] += n_gold

    for key in ["S", "S+N", "S+R", "S+N+R"]:
        precision = float(total_ok_dict[key]) / float(total_pred_dict[key])
        recall = float(total_ok_dict[key]) / float(total_gold_dict[key])
        f1 = (2 * precision * recall) / (precision + recall)
        precision_info = "%d/%d" % (total_ok_dict[key], total_pred_dict[key])
        recall_info = "%d/%d" % (total_ok_dict[key], total_gold_dict[key])
        scores[key] = {
            "Precision": precision,
            "Recall": recall,
            "Micro F1": f1,
            "Precision_info": precision_info,
            "Recall_info": recall_info
        }

    return scores
示例#28
0
    def __init__(self, roll=[]):
        '''	Initializes a roll to the given list
		'''
        self.dice = mset.Multiset(roll)
            new_states = get_states()
            Q_table = renew_Q(best_action, prev_val,
                Q_table, original_states, new_states)
            tf = not(complete_exodia() or len(decks) == 0 or j >= 30)
            j += 1
        Q_tables.append(Q_table)

    except :
        pass

print(n_success)

Q_table.sum()
Q_table.shape

sum(Q_table.hands == ms.Multiset(["チキン・レース",
    "テラ・フォーミング", "チキン・レース", "成金ゴブリン", "封印されしエグゾディア"]))


Q_table.loc[Q_table.hands == ms.Multiset(["チキン・レース", "テラ・フォーミング", "チキン・レース", "成金ゴブリン", "封印されしエグゾディア"]), :]
Q_table.drop(columns=["hands", "fields", "cemetary"]).sum(axis=1).sort_values()
Q_table.iloc[2491,]["hands"]
Q_table.iloc[690,]["hands"]
Q_table.iloc[1422,]["hands"]
Q_table.iloc[4153,]
Q_table.iloc[56, ]
Q_table.query("テラ・フォーミング >= 10")
Q_table[Q_table["テラ・フォーミング"]>=10]


#### Duel simulation
duel = Duel()
示例#30
0
def roll(dice_count=6):
    diceset = ''
    for i in range(dice_count):
        diceset += str(rnd.randint(1, 6))
    return ms.Multiset(diceset)