コード例 #1
0
ファイル: query.py プロジェクト: zero804/kajongg
 def cleanPlayerTable(self):
     """remove now unneeded columns host, password and make names unique"""
     playerCounts = IntDict()
     names = {}
     keep = {}
     for nameId, name in Query('select id,name from player').records:
         playerCounts[name] += 1
         names[int(nameId)] = name
     for name, counter in defaultdict.items(playerCounts):
         nameIds = [x[0] for x in names.items() if x[1] == name]
         keepId = nameIds[0]
         keep[keepId] = name
         if counter > 1:
             for nameId in nameIds[1:]:
                 Query(
                     'update score set player=%d where player=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p0=%d where p0=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p1=%d where p1=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p2=%d where p2=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p3=%d where p3=%d' %
                     (keepId, nameId))
                 Query('delete from player where id=%d' % nameId)
     Query('drop table player')
     self.createTable('player')
     for nameId, name in keep.items():
         Query('insert into player(id,name) values(?,?)', (nameId, name))
コード例 #2
0
ファイル: query.py プロジェクト: KDE/kajongg
 def cleanPlayerTable(self):
     """remove now unneeded columns host, password and make names unique"""
     playerCounts = IntDict()
     names = {}
     keep = {}
     for nameId, name in Query('select id,name from player').records:
         playerCounts[name] += 1
         names[int(nameId)] = name
     for name, counter in defaultdict.items(playerCounts):
         nameIds = [x[0] for x in names.items() if x[1] == name]
         keepId = nameIds[0]
         keep[keepId] = name
         if counter > 1:
             for nameId in nameIds[1:]:
                 Query(
                     'update score set player=%d where player=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p0=%d where p0=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p1=%d where p1=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p2=%d where p2=%d' %
                     (keepId, nameId))
                 Query(
                     'update game set p3=%d where p3=%d' %
                     (keepId, nameId))
                 Query('delete from player where id=%d' % nameId)
     Query('drop table player')
     self.createTable('player')
     for nameId, name in keep.items():
         Query('insert into player(id,name) values(?,?)', (nameId, name))
コード例 #3
0
    def sorted_childviews(self, objectview=None):
        if objectview is None:
            objectview = self.objectview
        child_repr_values = {}
        for key, value in defaultdict.items(self):
            keylen = str(len(
                key))  # it needs to be a string to work with _numeric_sortkey
            abridged_key = key.value[:
                                     1000]  # these can be huge and we won't see all of it anyway
            child_repr_values[key] = self.ChildView(
                child_key_abridged=abridged_key,
                child_key_length=keylen,
                total_child_result_count=value.real_length(),
                relevant_child_result_count=len(value),
                child_value=value,
                child_pointer=self.pointer_to_me.copy().get_by_index(key))

        # "reversed" actually yields the expected (i.e. non-reversed-seeming) result...
        reverse = lambda iter: reversed(iter) if objectview["sort_reverse"
                                                            ] else iter
        if objectview["sort_by"] == "total":
            return OrderedDict(
                reverse(
                    sorted(child_repr_values.items(),
                           key=lambda tup: tup[1].total_child_result_count)))
        elif objectview["sort_by"] == "length":
            if objectview["sort_numeric"]:
                return OrderedDict(
                    reverse(
                        sorted(child_repr_values.items(),
                               key=lambda tup: self._numeric_sortkey(tup[
                                   1].child_key_length))))
            else:
                return OrderedDict(
                    reverse(
                        sorted(child_repr_values.items(),
                               key=lambda tup: tup[1].child_key_length)))
        elif objectview["sort_by"] == "attribute value":
            if objectview["sort_numeric"]:
                return OrderedDict(
                    reverse(
                        sorted(child_repr_values.items(),
                               key=lambda tup: self._numeric_sortkey(tup[
                                   1].child_key_abridged))))
            else:
                return OrderedDict(
                    reverse(
                        sorted(child_repr_values.items(),
                               key=lambda tup: tup[1].child_key_abridged)))
        else:  # elif self._sort_by == "relevant":
            return OrderedDict(
                reverse(
                    sorted(
                        child_repr_values.items(),
                        key=lambda tup: tup[1].relevant_child_result_count)))
コード例 #4
0
    def runTrails(self,
                  alphas=[0.001, 0.005, 0.00592],
                  betas=[0.0003, 0.0005, 0.00798],
                  lambdas=[0, 0.2, 0.4, 0.6],
                  degree=[3],
                  path="Result/"):
        print("actor_critic(lambda)\n")
        log = "alpha    beta     lambda2     degree    mean_return    max_return     last_return\n"
        dict = {}

        for alpha in alphas:
            for beta in betas:
                for lambda2 in lambdas:
                    trails = []
                    for i in range(self.trail_n):
                        if self.state_type == "continue":
                            result = self.run(alpha=alpha,
                                              beta=beta,
                                              lambda2=lambda2)
                        else:
                            result = self.run_Tabular(alpha=alpha, beta=beta)
                        trails.append(result)
                    path_temp = path + self.env_name + "/" + self.agent_name + "/"
                    name = path_temp + str(alpha) + "_" + str(
                        beta) + "_" + str(lambda2)+"_"+\
                           str(self.degree)
                    name = name.replace(".", "")
                    pickle.dump(trails, open(name, "wb"))
                    plot_trails(trails, name)
                    trails = np.array(trails)
                    trails = trails[:, 90:]
                    dict.update({
                        str(alpha) + "_" + str(beta) + "_" + str(lambda2) + "_" + str(self.degree):
                        np.mean(trails)
                    })
                    print("done one trails", np.mean(trails))

        sorted_J = sorted(dict.items(),
                          key=operator.itemgetter(1),
                          reverse=True)
        print(str(sorted_J[0]))
        f = open(path_temp + 'log.txt', 'a')
        f.write(str(sorted_J))
        f.write("\n")
        f.close()
コード例 #5
0
    def runTrails(self,
                  alphas=[0.001, 0.002, 0.003, 0.008],
                  epsilons=[0.008, 0.01, 0.05],
                  lambda2s=[0, 0.3, 0.5, 0.8],
                  path="Result/"):
        log = "alpha    epsilon     lambuda       degree    mean_return    max_return     last_return\n"
        dict = {}

        for alpha in alphas:
            for epsilon in epsilons:
                for lambda2 in lambda2s:
                    trails = []
                    for i in range(self.trail_n):
                        if self.state_type == "continue":
                            result = self.run(alpha=alpha,
                                              epsilon=epsilon,
                                              lambda2=lambda2)
                        else:
                            result = self.run_tabular(alpha=alpha,
                                                      epsilon=epsilon,
                                                      lambda2=lambda2)
                            print("done one", i)
                        trails.append(result)
                    path_t = path + self.env_name + "/" + self.agent_name + "/"
                    name = path_t + str(alpha) + "_" + str(
                        epsilon) + "_" + str(lambda2) + "_" + str(self.degree)
                    name = name.replace(".", "")
                    pickle.dump(trails, open(name, "wb"))
                    plot_trails(trails, name)
                    trails = np.array(trails)
                    trails = trails[:, 90:]
                    dict.update({
                        str(alpha) + "_" + str(epsilon) + "_" + str(lambda2) + "_" + str(self.degree):
                        np.mean(trails)
                    })
                    print("done one trails", np.mean(trails))
        sorted_J = sorted(dict.items(),
                          key=operator.itemgetter(1),
                          reverse=True)
        print(sorted_J[0])
        f = open(path_t + 'log.txt', 'a')
        f.write(str(sorted_J))
        f.write("\n")
        f.close()
コード例 #6
0
def keywithmaxvalue(d):
    ''' This function resturns the key for the maximum value in a dictionary'''
    newhash = d(list)
    for k, v in d.items():
        newhash[v].append(k)
    return newhash[max(newhash.keys())]
コード例 #7
0
ファイル: common.py プロジェクト: ospalh/kajongg-fork
 def clear(self):
     """also update parent if given"""
     if self.parent is not None:
         for key, value in defaultdict.items(self):
             self.parent[key] -= value
     defaultdict.clear(self)
コード例 #8
0
    def calculate_stats(self):
        self.stats = {
            'unique_word_counts': {},
            'document_word_counts': {},
            'unique_word_counts_per_year': {},
            'document_word_counts_per_year': {},
            'days_a_year': {}
        }

        # Get bow values from self.documents_word_counts
        # which has vocabulary as keys and
        # word count as sum of their counts
        for d_id, bow in self.documents_word_counts.items():
            self.stats['unique_word_counts'][d_id] = len(bow)
            self.stats['document_word_counts'][d_id] = sum(
                [c for (w_id, c) in bow])

        # Look load_tbmm_corpus for date_mapping structure
        # r_date_mappings is the reduced version of date mappings
        # it does not have 'interval' items
        # it has file names as keys instead of HTTP location
        r_date_mappings = {
            term: {
                addr.split('/')[-1][:-4]: _date
                for addr, _date in dd.items() if addr != 'interval'
            }
            for term, dd in self.date_mappings.items()
        }

        # Two different objects to navigate on

        # doc2id_time is a dictionary with file names as keys and
        # document_id, time(as Date object) tuple as values
        doc2id_time = {}

        # time2id_doc is a dictionary with time as keys and
        # file name, document_id tuple as values
        time2id_doc = {}
        for _id, v in self.documents_metadata.items():
            # Ex metadata item: (1, {'filepath': 'tbt/tbt-ty05/tbmm05005fih/'})
            # key is document_id and
            # value is a dictionary with 'filepath' item which has the value
            # <Type of Doc?>/<Term>/<Document Name>
            term, document_name = v['filepath'].split('/')[1:3]

            # Curation of doc2id_time and time2id_doc
            # it is possible to add term values or create different type of
            # dictionaries to work on...
            if term in r_date_mappings:
                if document_name in r_date_mappings[term]:
                    doc_t = r_date_mappings[term][document_name]

                    id_time = (_id, doc_t)
                    if document_name in doc2id_time:
                        doc2id_time[document_name].append(id_time)
                    else:
                        doc2id_time[document_name] = [id_time]

                    id_doc = (_id, document_name)
                    if doc_t in time2id_doc:
                        time2id_doc[doc_t].append(id_doc)
                    else:
                        time2id_doc[doc_t] = [id_doc]

        # Similar to unique_word_counts and document_word_counts
        # but by the help of time2id_doc, this part calculates
        # days_a_year, unique_word_counts_per_year and document_word_counts_per_year
        for document_date in sorted(list(time2id_doc)):
            current_year = document_date.year

            # days_a_year holds day count for a year that a publish happened
            # and document count for a year
            # {
            #   ...
            #   2013: {'day_count': 63, 'document_count': 93}
            # }
            # In 2013, there are 93 published documents in 63 days..
            document_for_date = len(time2id_doc[document_date])
            if current_year in self.stats['days_a_year']:
                self.stats['days_a_year'][current_year]['day_count'] += 1
                self.stats['days_a_year'][current_year][
                    'document_count'] += document_for_date
            else:
                self.stats['days_a_year'][current_year] = {
                    'day_count': 1,
                    'document_count': document_for_date
                }

            for (d_id, doc) in time2id_doc[document_date]:
                bow = self.documents_word_counts[d_id]

                if current_year in self.stats['unique_word_counts_per_year']:
                    self.stats['unique_word_counts_per_year'][
                        current_year] += len(bow)
                    self.stats['document_word_counts_per_year'][
                        current_year] += sum([c for (w_id, c) in bow])
                else:
                    self.stats['unique_word_counts_per_year'][
                        current_year] = len(bow)
                    self.stats['document_word_counts_per_year'][
                        current_year] = sum([c for (w_id, c) in bow])
コード例 #9
0
ファイル: util.py プロジェクト: Juicechuan/AMR_graph
 def items(self):
     return [(k,v) for k,vv in defaultdict.items(self) for v in vv]