def _computeSimilarityMatrix(self, symbols):
        if symbols is None:
            raise TypeError("Symbols cannot be None")
        for symbol in symbols:
            if not isinstance(symbol, Symbol):
                raise TypeError("At least one specified symbol is not a valid symbol")

        # Execute the Clustering part in C
        debug = False
        wrapper = WrapperArgsFactory("_libScoreComputation.computeSimilarityMatrix")
        wrapper.typeList[wrapper.function](symbols)
        self._logger.debug("wrapper = {0}".format(wrapper))

        (listScores) = _libScoreComputation.computeSimilarityMatrix(
            self.internalSlick, self._cb_executionStatus, self._isFinish, debug, wrapper
        )
        # Retrieve the scores for each association of symbols
        scores = OrderedDict()
        for (iuid, juid, score) in listScores:
            if iuid not in scores.keys():
                scores[iuid] = OrderedDict()
            if juid not in scores.keys():
                scores[juid] = OrderedDict()
            scores[iuid][juid] = score
            if iuid not in scores[juid].keys():
                scores[juid][iuid] = score
        return scores
Example #2
1
def read_lemmas(lemma_descriptions_list, multiclass=False, return_joint=False):
    """
    Извлекает информацию из описаний лемм,
    полученных с помощью функции process_lemmas_file
    """
    data = OrderedDict()
    # в случае, когда одна лексема может иметь несколько парадигм,
    # сохраняем все такие парадигмы
    if not multiclass:

        def add(lst, x):
            if len(lst) == 0:
                lst.append(x)

    else:

        def add(lst, x):
            lst.append(x)

    for lemma, code, var_values in lemma_descriptions_list:
        if lemma in data:
            add(data[lemma], (code, var_values))
        else:
            data[lemma] = [(code, var_values)]
    if return_joint:
        return list(data.keys()), list(data.values())
    else:
        return (
            list(data.keys()),
            [list(x[0] for x in elem) for elem in data.values()],
            [list(x[1] for x in elem) for elem in data.values()],
        )
Example #3
1
def read_parameters():
    SECTION = "default"

    parser = OptionParser()
    config = configparser.ConfigParser()
    config.read("environments.conf")
    environments = OrderedDict()

    for option in config.options(SECTION):
        environments[option] = config.get(SECTION, option)

    opt_environments = ", ".join(environments.keys())
    default = next(reversed(environments))
    parser.add_option(
        "-e",
        "--environment",
        dest="environment",
        default=default,
        type="choice",
        choices=list(environments.keys()),
        help="set default environment to: " + opt_environments,
    )

    (options, args) = parser.parse_args()

    return environments.get(options.environment, None)
    def get_map(file_name):
        wd, pd, cd = OrderedDict(), OrderedDict(), OrderedDict()
        with open(file_name, "r") as f:
            for line in f:
                if line.startswith("#") or len(line) <= 1:
                    continue
                wpc = [wpc.split("|") for wpc in line.split()]
                words, poses, cates = zip(*wpc)
                wd.update([(word, None) for word in words])
                pd.update([(pos, None) for pos in poses])
                for cate in cates:
                    if cate in cd:
                        cd[cate] += 1
                    else:
                        cd[cate] = 1
        word2idx.update([(word, i) for i, word in enumerate(wd.keys())])
        pos2idx.update([(pos, i) for i, pos in enumerate(pd.keys())])
        idx2word.update([(i, word) for i, word in enumerate(wd.keys())])
        idx2pos.update([(i, pos) for i, pos in enumerate(pd.keys())])

        cc = [c for c in cd.keys() if cd[c] >= most_common]
        cate2idx.update([(c, i) for i, c in enumerate(cc)])
        idx2cate.update([(i, c) for i, c in enumerate(cc)])

        insert2map(woov, word2idx, idx2word)
        insert2map(poov, pos2idx, idx2pos)
        insert2map(coov, cate2idx, idx2cate)
Example #5
1
class QuestionValueTable(object):
    def __init__(self):
        self._valueTable = OrderedDict()
        self._expressionTable = OrderedDict()

    def __iter__(self):
        tempTable = {}
        tempTable.update(self._valueTable)
        tempTable.update(self._expressionTable)
        return tempTable.__iter__()

    def questions(self):
        return self._valueTable.keys() + self._expressionTable.keys()

    def add(self, question):
        if question.valueExpression:
            self._expressionTable[question] = question.valueExpression
        else:
            self._valueTable[question] = None

    def update(self, question, value):
        if question in self._valueTable:
            self._valueTable[question] = value

    def get(self, question):
        if question in self._expressionTable:
            return self._expressionTable[question].value
        return self._valueTable.get(question, None)
Example #6
1
class UnitGroup(object):
    def __init__(self, items, default_index=0):
        self.units = OrderedDict(items)
        self.by_name = dict(reversed(i) for i in enumerate(self.units.keys()))
        self.by_scalefactor = dict(reversed(i) for i in enumerate(self.units.values()))
        self.__default_index = default_index

    def idx_by_name(self, name):
        return self.by_name[name]

    def idx_by_scale(self, scale):
        return self.by_scale[scale]

    def get_scale(self, idx):
        return list(self.units.values())[idx]

    def get_name(self, idx):
        return list(self.units.keys())[idx]

    @property
    def names(self):
        return list(self.units.keys())

    @property
    def default_index(self):
        return self.__default_index
Example #7
0
def humans_per_hour(game, **kwargs):
    data = []
    end_date = min(timezone.now(), game.end_date)
    end_td = end_date - game.start_date
    end_hour = end_td.days * 24 + round(end_td.seconds / 3600, 0)
    for dorm, dormName in DORMS:
        sh = game.get_active_players().filter(dorm=dorm).count()  # starting humans in this dorm
        d = OrderedDict([(0, sh)])
        kills = Kill.objects.exclude(parent=None).filter(victim__game=game, victim__dorm=dorm).order_by("date")
        for index, kill in enumerate(kills, 1):
            kd = max(kill.date, game.start_date) - game.start_date
            hours = kd.days * 24 + round(kd.seconds / 3600, 1)
            d[min(hours, end_hour)] = sh - index  # overwrite
        if end_hour not in d:
            d[end_hour] = d[d.keys()[-1]]
        data.append({"name": dormName, "data": d.items()})
        # add dataset for all dorms
    sh = game.get_active_players().count() - Kill.objects.filter(parent=None, killer__game=game).count()  # subtract LZs
    d = OrderedDict([(0, sh)])
    kills = Kill.objects.exclude(parent=None).filter(victim__game=game).order_by("date")
    for index, kill in enumerate(kills, 1):
        kd = max(kill.date, game.start_date) - game.start_date
        hours = kd.days * 24 + round(kd.seconds / 3600, 1)
        d[min(hours, end_hour)] = sh - index  # overwrite
    if end_hour not in d:
        d[end_hour] = d[d.keys()[-1]]
    data.append({"name": "ALL", "data": d.items()})
    return data
Example #8
0
def join(keys, tables):
    """Merge a list of `Table` objects using `keys` to group rows"""

    # Make new (merged) Table fields
    fields = OrderedDict()
    for table in tables:
        fields.update(table.fields)
    # TODO: may raise an error if a same field is different in some tables

    # Check if all keys are inside merged Table's fields
    fields_keys = set(fields.keys())
    for key in keys:
        if key not in fields_keys:
            raise ValueError('Invalid key: "{}"'.format(key))

    # Group rows by key, without missing ordering
    none_fields = lambda: OrderedDict({field: None for field in fields.keys()})
    data = OrderedDict()
    for table in tables:
        for row in table:
            row_key = tuple([getattr(row, key) for key in keys])
            if row_key not in data:
                data[row_key] = none_fields()
            data[row_key].update(row._asdict())

    merged = Table(fields=fields)
    merged.extend(data.values())
    return merged
Example #9
0
    def convert_to_multipartite(self):
        from collections import OrderedDict

        if (len(self.nodetypes) == 1) & (self.nodetypes[0].id == "monopartite"):
            nodetype_to_node_dict = OrderedDict()
            for the_nodetype in self.nodetypes:
                for the_node in the_nodetype.nodes:
                    the_nodetype_id = the_node._nodetype
                    if the_nodetype_id not in nodetype_to_node_dict.keys():
                        nodetype_to_node_dict[the_nodetype_id] = []
                    nodetype_to_node_dict[the_nodetype_id].append(the_node)
                    # Make sure all node_ids are unique to avoid problems here
                    # Warns in update_self

            existing_nodetype_ids = [x.id for x in self.nodetypes]

            if len(nodetype_to_node_dict.keys()) > 1:
                # might want to enforce this in the future
                # if 'monopartite' in nodetype_to_node_dict.keys():
                #     print "Error, cannot convert to multipartite if all nodes have monopartite nodetype designation."
                # else:
                old_nodetype = self.nodetypes.get_by_id("monopartite")
                for the_nodetype_id in nodetype_to_node_dict.keys():
                    the_nodetype = self.add_nodetype(the_nodetype_id)
                    the_nodes = nodetype_to_node_dict[the_nodetype_id]
                    old_nodetype.nodes.remove_subset(the_nodes)
                    the_nodetype.add_nodes(the_nodes)
                if len(old_nodetype.nodes) == 0:
                    setattr(old_nodetype, "_network", None)
                    self.nodetypes.remove(old_nodetype)
            else:
                print "Need more than one nodetype designation to convert to multipartite."
Example #10
0
class MemObject(object):
    """
    The superclass for all objects parsed from memory captures.
    """

    def __init__(self, offset):
        """
        All memobj attributes are stored in the fields dictionary

        @offset: the offset in the memory image of this object
        """
        # Using ordereddict gets us free logical ordering when printing results
        self.fields = OrderedDict()

        self.fields["offset"] = offset

    def get_field_keys(self):
        """
        Since using ordereddict, prettier ordering based on the programmatic 
        order that fields are entered

        @return: an ordered list of fields keys for the memobj
        """
        return self.fields.keys()

    def __str__(self):
        """
        @return: string of all fields of a memobj
        """
        return "".join(["%s: %s\t" % (elem, self.fields[elem]) for elem in self.fields.keys()])
Example #11
0
def fixgridsearch(hparamfile, generate):

    hparams = OrderedDict()
    dhparams = OrderedDict()

    for hparam in HparamReader(hparamfile):

        if "generate" not in hparam or hparam["generate"] in ["default", ""]:
            if hparam["generate"] == "":
                print "*** Warning ***"
                print "    Hyperparameter", hparam["hparam"]
                print "    Please set generation mode : default"

            hparam["generate"] = generate

        dhparams[hparam["hparam"]] = hparam.pop("default")

        name = hparam.pop("hparam")
        hparams[name] = hparams.get(name, []) + list(make_hparams(**hparam))

    values = np.zeros((sum([len(hparam) for hparam in hparams.values()]), len(hparams.keys())))

    j = 0
    for i, hparam in enumerate(hparams.items()):
        # set all default values
        values[j : j + len(hparam[1])] = np.array(dhparams.values())
        # set the value of the current hyper-parameter
        values[j : j + len(hparam[1]), i] = np.array(hparam[1])

        j += len(hparam[1])

    return hparams.keys(), values
Example #12
0
def test_config_object_util(no_config_env):
    project = Project.objects.first()
    conf = ObjectConfig(project)
    assert conf.items() == conf.values() == conf.keys() == []
    # keys are returned order by key
    other_dict = OrderedDict()
    other_dict["foo.a"] = "bar"
    other_dict["foo.b"] = dict(bar=23)
    other_dict["foo.c"] = [1, 2, 3]
    conf["foo.a"] = "bar"
    conf["foo.b"] = dict(bar=23)
    conf["foo.c"] = [1, 2, 3]
    assert conf.items() == other_dict.items()
    assert conf.values() == other_dict.values()
    assert conf.keys() == other_dict.keys()
    assert [x for x in conf] == other_dict.keys()
    assert all(x in conf for x in other_dict)
    assert all(conf[k] == v for k, v in other_dict.items())
    assert all(conf.get(k) == v for k, v in other_dict.items())
    assert conf.get("DOESNOTEXIST") is None
    assert conf.get("DOESNOTEXIST", "foo") == "foo"
    with pytest.raises(KeyError):
        conf["DOESNOTEXIST"]
    assert ObjectConfig(project).items() == other_dict.items()
    assert ObjectConfig(project).values() == other_dict.values()
    assert ObjectConfig(project).keys() == other_dict.keys()
    assert [x for x in ObjectConfig(project)] == other_dict.keys()
    assert all(x in ObjectConfig(project) for x in other_dict)
    assert all(ObjectConfig(project)[k] == v for k, v in other_dict.items())
    assert all(ObjectConfig(project).get(k) == v for k, v in other_dict.items())
    assert ObjectConfig(project).get("DOESNOTEXIST") is None
    assert ObjectConfig(project).get("DOESNOTEXIST", "foo") == "foo"
    with pytest.raises(KeyError):
        ObjectConfig(project)["DOESNOTEXIST"]
Example #13
0
class Cost:
    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, momentum_lambda=None):
        updates = OrderedDict()
        momentum = OrderedDict()

        grads = T.grad(
            self.cost, self.params.keys(), consider_constant=self.constants.keys(), disconnected_inputs="ignore"
        )
        for param, gparam in zip(self.params.keys(), grads):

            if momentum_lambda:
                momentum[param] = sharedX(numpy.zeros_like(param.get_value()), name=param.name + "_mom")
                new_grad = momentum_lambda * momentum[param] + (1.0 - momentum_lambda) * gparam
                updates[momentum[param]] = new_grad
            else:
                new_grad = gparam

            self.grads[param] = new_grad

        self.computed_cost = True
        return updates
Example #14
0
def scalar_per_tract_mean_std(tractographies, scalar):
    try:

        results = OrderedDict(
            (
                ("tract file #", []),
                ("per tract distance weighted mean %s" % scalar, []),
                ("per tract distance weighted std %s" % scalar, []),
            )
        )
        for j, tractography in enumerate(tractographies):
            scalars = tractography.tracts_data()[scalar]
            weighted_scalars = numpy.empty((len(tractography.tracts()), 2))
            for i, t in enumerate(tractography.tracts()):
                tdiff = numpy.sqrt((numpy.diff(t, axis=0) ** 2).sum(-1))
                length = tdiff.sum()
                values = scalars[i][1:].squeeze()
                average = numpy.average(values, weights=tdiff)
                weighted_scalars[i, 0] = average
                weighted_scalars[i, 1] = length
            mean = numpy.average(weighted_scalars[:, 0], weights=weighted_scalars[:, 1])
            std = numpy.average((weighted_scalars[:, 0] - mean) ** 2, weights=weighted_scalars[:, 1])
            results[results.keys()[0]].append(j)
            results[results.keys()[1]].append(float(mean))
            results[results.keys()[2]].append(float(std))

        return results
    except KeyError:
        raise ValueError("Tractography does not contain this scalar data")
Example #15
0
class akmers:
    def __init__(self):
        self.mers = OrderedDict()
        self.smers_set = set()

    # *******************************************
    def get_mers(self):
        return set(self.mers.keys())

    # *******************************************
    def update_smer_set(self):
        self.smers_set = set(self.mers.keys())

    # *******************************************
    def add_mer(self, mer, count):
        self.mers[mer] = count

    # *******************************************
    def remove_mer(self, mer):
        del self.mers[mer]

    # *******************************************
    def has_mers(self):
        if len(self.mers) > 0 and max(self.mers.values()) > 1:
            return True
        else:
            return False

    # *******************************************
    def get_count(self, mer):
        return self.mers[mer]
Example #16
0
def makeHistOfDoms(infname='training_ids_with_their_domains.csv',outfname='HistOfDoms.csv'):
    print 'makeHistOfDoms'+' '+infname
    outf=open(outfname,'w')
    writer=csv.writer(outf)
    inf=open(infname,'r')
    reader=csv.reader(inf)
    domList=getListOfTheColumn('all_domains.csv',0)
    nDoms=len(domList)
    yesList=OrderedDict()
    noList=OrderedDict()
    for i in domList:
        yesList[i]=0
    cnt=0
    for i in reader:
        cnt+=1
        for j in i[2:]:
            yesList[j]+=1
    for i in domList:
        yesList[i]=yesList[i]/float(cnt)
        noList[i]=1-yesList[i]
    writer.writerow(noList.keys())
    writer.writerow(noList.values())
    writer.writerow(yesList.values())
    inf.close()
    outf.close()
    out=dict()
    out['domIndex']=noList.keys()
    out['no']=noList.values()
    out['yes']=yesList.values()
    print 'done'
    return out
Example #17
0
    def __init__(self, parent, serviceManager, recordService):
        self.parent = parent
        self.recordService = recordService
        if serviceManager:
            self.serviceManager = serviceManager
            self.cv_service = serviceManager.get_cv_service()
            self.series_service = serviceManager.get_series_service()
            offsetChoices = OrderedDict((x.description, x.id) for x in self.cv_service.get_offset_type_cvs())
            self.offSetTypeChoices = [NULL] + offsetChoices.keys()

            labChoices = OrderedDict((x.lab_sample_code, x.id) for x in self.cv_service.get_samples())

            self.censorCodeChoices = [NULL] + [x.term for x in self.cv_service.get_censor_code_cvs()]
            self.labSampleChoices = [NULL] + labChoices.keys()

            self.qualifierChoices = OrderedDict(
                (x.code + ":" + x.description, x.id)
                for x in self.series_service.get_all_qualifiers()
                if x.code and x.description
            )
            self.qualifierCodeChoices = [NULL] + self.qualifierChoices.keys() + [NEW]

        else:
            self.censorCodeChoices = [NULL] + ["SampleCensorCode1"] + ["SampleCensorCode2"] + ["SampleCensorCode3"]
            self.labSampleChoices = [NULL] + ["SampleLabSample1"] + ["SampleLabSample2"] + ["SampleLabSample3"]
            self.offSetTypeChoices = [NULL] + ["SampleOffsetType1"] + ["SampleOffsetType2"] + ["SampleOffsetType3"]
            self.qualifierCodeChoices = (
                [NULL] + ["SampleQualifierCode1"] + ["SampleQualifierCode2"] + ["SampleQualifierCode3"]
            )
Example #18
0
    def next(self):
        if self.IND < len(self.Data):
            r = self.Data[self.IND]
            r = OrderedDict(
                [
                    (
                        self.Data.dtype.names[i],
                        float(xx) if isinstance(xx, float) else int(xx) if isinstance(xx, int) else xx,
                    )
                    for (i, xx) in enumerate(r)
                    if xx != ""
                ]
            )

            if "subcollections" in r.keys():
                r["subcollections"] = r["subcollections"].split(",")

            for k in self.columnGroups.get("timeColumns", []) + self.columnGroups.get("spaceColumns", []):
                if k in r.keys():
                    r[k] = eval(r[k])

            self.IND += 1

            return r

        else:
            raise StopIteration
Example #19
0
def dict_sort():
    # method 1 (按value排序)
    mdict = {"zhang": 1, "wang": 2, "yin": 8}
    dict_sorted = sorted(mdict.iteritems(), key=lambda asd: asd[1], reverse=False)
    print (mdict, dict_sorted)  # 排序后成了元组列表,而不是字典了和原来的格式不一样了

    # method1 (按key排序)
    from operator import itemgetter

    dict_sorted = sorted(mdict.items(), key=itemgetter(0), reverse=True)
    for word, count in dict_sorted:
        print "%s %s" % (word, count)

    # method 2
    cd = dict([("a", 1), ("b", 2), ("c", 3)])  # 从列表到字典的构建(无序)
    print cd
    od = OrderedDict([("a", 1), ("c", 2), ("b", 3)])  # 从列表到字典(有序)(按原始顺序)
    print od

    # 注意:
    od = OrderedDict()
    od["b"] = 1
    od["c"] = 2
    od["a"] = 3
    print od.keys()

    # 字典的构造
    d = dict(name="Bob", age=20, score=88)
    print (d)
Example #20
0
 def clean_tally(self):
     """
     Cleans the tally. Scoping prevents us from modifying this in the studio
     and in the LMS the way we want to without undesirable side effects. So
     we just clean it up on first access within the LMS, in case the studio
     has made changes to the answers.
     """
     questions = OrderedDict(self.questions)
     answers = OrderedDict(self.answers)
     default_answers = {answer: 0 for answer in answers.keys()}
     for key in questions.keys():
         if key not in self.tally:
             self.tally[key] = dict(default_answers)
         else:
             # Answers may have changed, requiring an update for each
             # question.
             new_answers = dict(default_answers)
             new_answers.update(self.tally[key])
             for existing_key in self.tally[key]:
                 if existing_key not in default_answers:
                     del new_answers[existing_key]
             self.tally[key] = new_answers
     # Keys for questions that no longer exist can break calculations.
     for key in self.tally.keys():
         if key not in questions:
             del self.tally[key]
Example #21
0
def test_update():
    """test the update function"""
    # do we really add nothing if add==False ?
    d = poheader.update({}, test="hello")
    assert len(d) == 0
    # do we add if add==True ?
    d = poheader.update({}, add=True, Test="hello")
    assert len(d) == 1
    assert d["Test"] == "hello"
    # do we really update ?
    d = poheader.update({"Test": "hello"}, add=True, Test="World")
    assert len(d) == 1
    assert d["Test"] == "World"
    # does key rewrite work ?
    d = poheader.update({}, add=True, test_me="hello")
    assert d["Test-Me"] == "hello"
    # is the order correct ?
    d = OrderedDict()
    d["Project-Id-Version"] = "abc"
    d["POT-Creation-Date"] = "now"
    d = poheader.update(d, add=True, Test="hello", Report_Msgid_Bugs_To="bugs@list.org")
    assert d.keys()[0] == "Project-Id-Version"
    assert d.keys()[1] == "Report-Msgid-Bugs-To"
    assert d.keys()[2] == "POT-Creation-Date"
    assert d.keys()[3] == "Test"
Example #22
0
def get_upcoming_events(user=None):
    events = OrderedDict( (e.pk, e) for e in
        Event.objects.filter(
            date__gte=datetime.date.today())
        .annotate(Count('signup'))
        .select_subclasses())

    # get number coming
    tmp = (Event.objects.filter(pk__in=events.keys())
        .filter(signup__coming__in=[Signup.HOLE, Signup.DIRECT])
        .annotate(num_coming=Count('signup')))
    for e in tmp:
        events[e.pk].num_coming = e.num_coming

    if user is not None:
        # get if user has signed up
        tmp = user.signup_set.filter(
            event_id__in=events.keys())

        for e in events.values():
            e.user_signup = None
        for s in tmp:
            events[s.event_id].user_signup = s

    return events.values()
Example #23
0
def get_composition(atoms, basis=None):
    """ Acquire the chemical composition of an atoms object

    Returns: a dictionary of atoms and their compositions
    dictionary sorted by atomic number
    """

    symbols = atoms.get_chemical_symbols()
    count = len(symbols)

    # Collect the symbol and count of each atom type
    S = OrderedDict()

    for symbol in symbols:

        if symbol in S.keys():
            S[symbol] += 1.0
        else:
            S[symbol] = 1.0

    # Convert to composition
    for key, val in S.iteritems():
        S[key] = val / count

    if basis:
        if basis in S.keys():
            return S[basis]
        else:
            return 0.0
    else:
        return S
Example #24
0
class GPL_Lite(object):
    def __init__(self, fp):
        self.cols = OrderedDict()
        self.rows = OrderedDict()

        # GPL ID
        self.id = RX_PLATFORM.match(fp.next()).group(1)
        # Column Attribute Descriptions
        for line in fp:
            m = RX_HEADER.match(line)
            if not m:
                break
            key, value = m.groups()
            self.cols[key] = value
        s = line.strip("\r\n")
        assert s == HEAD_END_LINE, "[%s] != [%s]" % (s, HEAD_END_LINE)
        # Column titles
        titles = fp.next().strip("\r\n").split("\t")
        assert self.cols.keys() == titles, "%s != %s" % (self.cols.keys(), titles)
        # Data rows
        for i, line in enumerate(fp):
            line = line.strip("\r\n")
            if line == "":
                continue
            if line == TABLE_END_LINE:
                break
            row = line.split("\t")
            self.rows[row[0]] = dict(zip(self.cols.keys(), row))
            self.rows[row[0]]["n"] = i + 1

    def get_col_list(self, coltitle):
        """Return an ordered list of a particular column."""
        return [d[coltitle] for d in self.rows.values()]
Example #25
0
def main(
    plot=False,
    savefig="None",
    geoms="fcs",
    nNs=7,
    Ns=None,
    nspecies="1,2,3",
    nstencils="3,5,7",
    verbose=False,
    **kwargs
):
    nstencils = [int(_) for _ in nstencils.split(",")]
    if Ns is None:
        Ns = [8 * (2 ** i) for i in range(nNs)]
    else:
        Ns = list(map(int, Ns.split(",")))
        nNs = len(Ns)
    nspecies = list(map(int, nspecies.split(",")))
    varied = OrderedDict([("nspecies", nspecies), ("N", Ns), ("geom", geoms), ("nstencil", nstencils)])

    kw1 = default_constant.copy()
    kw1.update(kwargs)

    results = {"varied_keys": list(varied.keys()), "varied_values": list(varied.values())}

    all_params = list(product(*varied.values()))
    for params in progress(all_params) if verbose else all_params:
        kw2 = kw1.copy()
        kw2.update(dict(zip(varied.keys(), params)))
        results[params] = integrate(**kw2)
    basename = os.path.splitext(os.path.basename(__file__))[0]
    pickle.dump(results, gzip.open(basename + ".pkl", "wb"))
Example #26
0
    def updateTable(self):

        with SqlDatabase(self.dbFile, autoCommit=True) as db:
            self.updateColumns(db)

            systems = []
            hashes = []
            for k, v in self.coreInfo["cores"].iteritems():

                if "categories" not in v or v["categories"] != "Emulator":
                    continue

                name = ""
                if "systemname" in v:
                    name = v["systemname"]
                else:
                    if "display_name" in v:
                        name = v["display_name"]

                name = self.prettifySystem(name)
                systems.append(name)
                h = hashlib.sha1(name).hexdigest()
                hashes.append(h)

            sysDict = OrderedDict({})
            hashDict = OrderedDict({})
            for i in systems:
                sysDict[i] = None
            for a in hashes:
                hashDict[a] = None

            for s, h in zip(sysDict.keys(), hashDict.keys()):
                db.insert(self.tableName, self.rowsDict.keys(), values=[h, s])
Example #27
0
class LaborExchange(object):
    def __init__(self):
        self.cond = threading.Condition(threading.Lock())
        self.q = OrderedDict()
        self.wip = set()

    def __repr__(self):
        with self.cond:
            return "Queue: %s, WIP: %s" % (self.q.keys(), list(self.wip))

    def add(self, repo, job):
        with self.cond:
            if not (repo in self.wip and repo in self.q.keys()):
                self.q[repo] = job
                self.cond.notifyAll()

    def get_and_start(self):
        self.cond.acquire()
        while not [repo for repo in self.q if repo not in self.wip]:
            self.cond.wait()
        repo, val = self.q.popitem(False)
        self.wip.add(repo)
        self.cond.release()
        return repo, val

    def finished(self, repo):
        with self.cond:
            self.wip.remove(repo)
def test():
    from collections import OrderedDict as StdlibOrderedDict

    ordered_dict = OrderedDict(((1, "a"), (2, "b"), (3, "c")))
    stdlib_ordered_dict = StdlibOrderedDict(((1, "a"), (2, "b"), (3, "c")))

    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()

    ordered_dict.move_to_end(1)

    assert ordered_dict != stdlib_ordered_dict
    # assert stdlib_ordered_dict != ordered_dict
    assert ordered_dict.items() != stdlib_ordered_dict.items()
    assert ordered_dict.keys() != stdlib_ordered_dict.keys()
    assert ordered_dict.values() != stdlib_ordered_dict.values()

    del stdlib_ordered_dict[1]
    stdlib_ordered_dict[1] = "a"

    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()

    assert ordered_dict == OrderedDict(stdlib_ordered_dict) == stdlib_ordered_dict
    assert ordered_dict == StdlibOrderedDict(ordered_dict) == stdlib_ordered_dict
class Model:
    def __init__(self):
        self.MinPosition = Vector3([sys.float_info.max, sys.float_info.max, sys.float_info.max])
        self.MaxPosition = Vector3([-sys.float_info.max, -sys.float_info.max, -sys.float_info.max])
        self.Positions = []
        self.UVs = []
        self.Normals = []
        self.Meshes = []
        self.Vertices = OrderedDict()
        self.MaterialLib = None
        self.GenerateCollisionData = False

    def Compile(self, filename):
        if INVERT_Z_COMPONENT:
            for i in range(len(self.Positions)):
                self.Positions[i].z = -self.Positions[i].z

                # AABB calculation
        for pos in self.Positions:
            self.MinPosition = self.MinPosition.Min(Vector3([pos.x, pos.y, pos.z]))
            self.MaxPosition = self.MaxPosition.Max(Vector3([pos.x, pos.y, pos.z]))

        with zipfile.ZipFile(filename, "w", zipfile.ZIP_DEFLATED) as zf:
            # TODO: Write out the vertex description so it's not hard coded in the game

            # Be very careful re-ordering anything below! Many of these Compile calls have
            # side effects.

            model = StringIO.StringIO()
            WriteVector3(model, self.MinPosition)
            WriteVector3(model, self.MaxPosition)

            # Write out each of the meshes
            meshes = StringIO.StringIO()
            WriteUInt(meshes, len(self.Meshes))
            for mesh in self.Meshes:
                mesh.Compile(meshes, self.Vertices, self.MaterialLib.Materials.keys())
            zf.writestr("__meshes__", meshes.getvalue())

            # Write out all the vertex data, interleaved
            WriteUInt(model, len(self.Vertices))
            for vtx in self.Vertices.keys():
                pos = self.Positions[vtx.Position]
                uv = self.UVs[vtx.UV]
                normal = self.Normals[vtx.Normal]

                WriteVector3(model, pos)
                WriteVector2(model, uv)
                WriteVector3(model, normal)
            zf.writestr("__model__", model.getvalue())

            self.MaterialLib.Compile(zf)

            collision = StringIO.StringIO()
            if self.GenerateCollisionData:
                WriteUInt(collision, len(self.Vertices))
                for vtx in self.Vertices.keys():
                    pos = self.Positions[vtx.Position]
                    WriteVector3(collision, pos)
                zf.writestr("__collision__", collision.getvalue())
Example #30
0
class Cost:
    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, lr, multipliers=None):
        multipliers = OrderedDict() if multipliers is None else multipliers
        grads = T.grad(
            self.cost, self.params.keys(), consider_constant=self.constants.keys(), disconnected_inputs="ignore"
        )
        for param, gparam in zip(self.params.keys(), grads):
            param_lr = multipliers.get(param.name, 1.0) * lr
            self.grads[param] = param_lr * gparam
        self.computed_cost = True

    def update_gradient(self, param, new_grad):
        assert self.computed_cost
        assert self.grads.has_key(param)
        self.grads[param] = new_grad