Exemplo n.º 1
0
def get_variable_counts(df, var_lookup, domain_code):
    """
    Parameters:
        df (pandas.Dataframe) - form of `pivot_counts_df` output
        var_lookup (dict) - mapping of Variable ids to labels
        domain_code (str) - column accessor name for aggregation

    Returns:
        pandas.Dataframe or None
    """
    if domain_code not in df.columns:
        return None

    df2 = df[domain_code].reset_index()

    grouped = df2.groupby(['study', 'study_label', domain_code],
                          as_index=False).max()

    if len(grouped['count'].dropna()) == 0:
        return None

    grouped['var_code'] = grouped[domain_code].map(
        valmap(lambda x: x[0]['code'], var_lookup))
    grouped['var_label'] = grouped[domain_code].map(
        valmap(lambda x: x[0]['label'], var_lookup))

    return grouped
def pretty_json(item):
    if 'feature_maps' in item:
        item['feature_maps'] = valmap(__wrap_to_prevent_indentation,
                                      item['feature_maps'])
    if 'activations' in item:
        item['activations'] = valmap(__wrap_to_prevent_indentation,
                                     item['activations'])
    return json.dumps(item, indent=4, cls=NoIndentEncoder)
Exemplo n.º 3
0
 def parse(raw_form: str):
     form_data = valmap(first, parse_qs(raw_form, strict_parsing=True))
     return RsvpFormData(guest_id=form_data[RsvpFormData.GUEST_ID_FIELD],
                         party_id=form_data[RsvpFormData.PARTY_ID_FIELD],
                         attending=valmap(
                             _parse_bool,
                             dissoc(form_data, RsvpFormData.GUEST_ID_FIELD,
                                    RsvpFormData.PARTY_ID_FIELD)))
Exemplo n.º 4
0
    def __init__(self, stage):
        self.stage = stage
        elements = stage.stage_elements()

        def rotation_for_element(e):
            data = stage.element_data(e)
            return data['rotation']

        element_pairs = list(zip(elements, elements[1:]+[elements[0]]))
        pair_dict = dict(zip(elements, element_pairs))
        rotation_dict = valmap(lambda es: (rotation_for_element(es[0]), rotation_for_element(es[1])), pair_dict)
        difference_dict = valmap(lambda ps: (ps[1] - ps[0]) % 360, rotation_dict)

        self.distance = difference_dict
Exemplo n.º 5
0
 def decision_function(self, X, exposure=None):
     if not hasattr(self, 'estimator_'):
         raise NotFittedError()
     pred_args = valmap(growd(2),
                        valfilter(notnone, dict(X=X, exposure=exposure)))
     score = self.estimator_.predict(**pred_args)
     return score
Exemplo n.º 6
0
def _parse_metrics(metrics):
    # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
    metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)

    # missing = set(["Genes Detected", "Transcripts Detected", "Mean Per Base Cov."])
    correct = set(["rRNA", "rRNA_rate"])
    percentages = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
    to_change = dict({"5'-3' bias": 1,
                      "Intergenic pct": "Intergenic Rate",
                      "Intronic pct": "Intronic Rate",
                      "Exonic pct": "Exonic Rate",
                      "Duplication Rate of Mapped": 1,
                      "Average_insert_size": 1,
                      })
    total = ["Not aligned", "Aligned to genes", "No feature assigned"]

    out = {}
    total_reads = sum([int(metrics[name]) for name in total])
    out.update({key: val for key, val in metrics.iteritems() if key in correct})
    [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in
     percentages]

    for name in to_change:
        if not to_change[name]:
            continue
        try:
            if to_change[name] == 1:
                out.update({name: float(metrics[name])})
            else:
                out.update({to_change[name]: float(metrics[name])})
        # if we can't convert metrics[name] to float (?'s or other non-floats)
        except ValueError:
            continue
    return out
Exemplo n.º 7
0
def _to_pandas_code(reverse_categories, codes, missing_value):
    """更新字典
    确保missing_value其代码为-1
    原始代码 0 ~ len(categories)
    修改为 -1 ~ len(categories) - 1
    """
    zero_d = valfilter(lambda x: x == 0, reverse_categories)
    zero_d_key = list(zero_d.keys())[0]
    # 互换 zero <-> missing_value
    none_first_d = reverse_categories.copy()
    none_first_d[zero_d_key] = reverse_categories[missing_value]
    none_first_d[missing_value] = reverse_categories[zero_d_key]

    def dec_one(x):
        return x - 1

    none_first_d = valmap(dec_one, none_first_d)
    new_codes = codes.copy()
    # 将missing_value直接更改为 -1
    new_codes = np.where(new_codes == reverse_categories[missing_value], -1,
                         new_codes)

    # 将0键code更改为 missing_value code
    new_codes = np.where(new_codes == reverse_categories[zero_d_key],
                         reverse_categories[missing_value], new_codes)
    return new_codes, none_first_d
Exemplo n.º 8
0
def fetch_exercise_info(lang, exercise):
    url = "https://www.khanacademy.org/api/internal/translations/exercise_strings/{}?lang={}".format(exercise, lang)
    response = requests.get(url)
    exerciseInfo = response.json()
    # Assemble string lists
    aidToStrings = defaultdict(list)
    stringidRe = re.compile("crwdns(\d+):")
    for ti in exerciseInfo["translationItems"]:
        stringid = int(stringidRe.match(ti["jiptString"]).group(1))
        aidToStrings[ti["assessmentItem"]].append(stringid)
    # Assemble exercise type map
    aidToPtype = {}
    for ptype, aids in exerciseInfo["problemTypes"].items():
        for aid in aids:
            aidToPtype[aid] = ptype
    # Extract structure map
    aidToStructure = valmap(json.loads, exerciseInfo["translatedAssessmentItems"])
    # Assemble into one list of objects
    result = {}
    aids = set(itertools.chain(aidToStrings.keys(), aidToPtype.keys(), aidToStructure.keys()))
    for aid in aids:
        result[aid] = {
            "type": aidToPtype.get(aid, None),
            "strings": aidToStrings.get(aid, []),
            "structure": aidToStructure.get(aid, []),
        }
    return result
Exemplo n.º 9
0
    def explore(self):
        """
        Function to conduct the exploration process
        """
        # Conduct max_iter iterations
        for iter_num in tqdm.tqdm(range(self.max_iter)):
            # If stat_collection_freq, run an exploit
            if (iter_num + 1) % self.stat_collection_freq == 0:
                logger.info("Collecting stats...")
                self.collect_stats(iter_num)

            self.single_iteration()

        self.collect_stats(iter_num)

        # Dump the results to file
        if self.fname:
            json.dump(
                {
                    "policy":
                    dicttoolz.keymap(
                        str,
                        dicttoolz.valmap(
                            lambda d: dicttoolz.keymap(str, d),
                            self.policy.policy,
                        ),
                    ),
                    "stats":
                    self.stats,
                },
                open(self.fname, "w"),
            )
        logger.info(msg=self.stats)
Exemplo n.º 10
0
def _parse_metrics(metrics):
    # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
    metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)

    # missing = set(["Genes Detected", "Transcripts Detected", "Mean Per Base Cov."])
    correct = set(["rRNA", "rRNA_rate"])
    percentages = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
    to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
                      "Intronic pct": "Intronic Rate",
                      "Exonic pct": "Exonic Rate",
                      "Duplication Rate of Mapped": 1,
                      "Average insert size": 1,
                      })
    total = ["Not aligned", "Aligned to genes", "No feature assigned"]

    out = {}
    total_reads = sum([int(metrics[name]) for name in total])
    out.update({key: val for key, val in metrics.iteritems() if key in correct})
    [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in
     percentages]

    for name in to_change:
        if not to_change[name]:
            continue
        try:
            if to_change[name] == 1:
                out.update({name: float(metrics[name])})
            else:
                out.update({to_change[name]: float(metrics[name])})
        # if we can't convert metrics[name] to float (?'s or other non-floats)
        except ValueError:
            continue
    return out
Exemplo n.º 11
0
    def __init__(self, config: Dict):
        self._config_serializers: Dict[str, Type[Serializer]] = config["serializers"]
        self._config_bindings: Dict[Type, str] = config["serializer_bindings"]

        self._serializer_map: Dict[Type, Type[Serializer]] = dicttoolz.valmap(
            lambda v: self._config_serializers[v], self._config_bindings
        )

        # The bindings are sorted by specifity (meaning the lower the types
        # in the hierarchy occupy the first positions)
        self._bindings = list(sort((k, v) for k, v in self._serializer_map.items()))

        self._default_serializer: Optional[Type[Serializer]]
        try:
            self._default_serializer = self._config_serializers[config["default"]]
        except Exception:
            self._default_serializer = None

        self._serializers_by_id: Dict[int, Type[Serializer]] = {
            v.identifier(): v for k, v in self._config_serializers.items()  # type: ignore
        }
        self._quickserializer_by_identity: List[Optional[Type[Serializer]]] = [
            None
        ] * 1024

        for k, v in self._serializers_by_id.items():
            self._quickserializer_by_identity[k] = v
Exemplo n.º 12
0
def _parse_metrics(metrics):
    # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
    metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)

    missing = set(["Genes Detected", "Transcripts Detected",
                   "Mean Per Base Cov."])
    correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
    to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
                      "Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
                      "Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
                      "No feature assigned": 0, "Duplication Rate of Mapped": 1,
                      "Fragment Length Mean": 1,
                      "rRNA": 1, "Ambiguou alignment": 0})
    total = ["Not aligned", "Aligned to genes", "No feature assigned"]

    out = {}
    total_reads = sum([int(metrics[name]) for name in total])
    out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
    out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
    out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
    [out.update({name: 0}) for name in missing]
    [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]

    for name in to_change:
        if not to_change[name]:
            continue
        if to_change[name] == 1:
            out.update({name: float(metrics[name])})
        else:
            out.update({to_change[name]: float(metrics[name])})
    return out
def binarySearchParamsParallel(X, y, model, params, paramRanges):
    with multiprocessing.Pool(multiprocessing.cpu_count()) as p:
        args_generator = ((X, 
                           y,
                           model,
                           params, 
                           *x) for x in paramRanges)
        results = p.starmap(compareValsBaseCase, args_generator)
        name_result_tuples = zip((x[0] for x in paramRanges), 
                                 results)
        valsAndScores = dict(name_result_tuples)
    
    topVals = valmap(getTopVals, valsAndScores)
    
    score = model(X, 
                  y, 
                  {**params, 
                   **topVals},
                  )
    
    return {"values": topVals,
           "score": score,
           "valsAndScores": valsAndScores,
           "n_iterations": sum(x.shape[0] 
                               for x in valsAndScores.values())}
Exemplo n.º 14
0
def _parse_metrics(metrics):
    # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
    metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)

    missing = set(["Genes Detected", "Transcripts Detected",
                   "Mean Per Base Cov."])
    correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
    to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
                      "Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
                      "Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
                      "No feature assigned": 0, "Duplication Rate of Mapped": 1,
                      "Fragment Length Mean": 1,
                      "rRNA": 1, "Ambiguou alignment": 0})
    total = ["Not aligned", "Aligned to genes", "No feature assigned"]

    out = {}
    total_reads = sum([int(metrics[name]) for name in total])
    out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
    out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
    out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
    [out.update({name: 0}) for name in missing]
    [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]

    for name in to_change:
        if not to_change[name]:
            continue
        try:
            if to_change[name] == 1:
                out.update({name: float(metrics[name])})
            else:
                out.update({to_change[name]: float(metrics[name])})
        # if we can't convert metrics[name] to float (?'s or other non-floats)
        except ValueError:
            continue
    return out
Exemplo n.º 15
0
 def parse(raw_form: str):
     form_data = valmap(first, parse_qs(raw_form, strict_parsing=True))
     return RideShareFormData(
         guest_id=form_data[RideShareFormData.GUEST_ID_FIELD],
         party_id=form_data[RideShareFormData.PARTY_ID_FIELD],
         rideshare=_parse_bool(
             form_data[RideShareFormData.RIDESHARE_FIELD]))
Exemplo n.º 16
0
def histogram(results):
    from toolz import recipes, dicttoolz
    import math
    counts = recipes.countby(lambda r: r.upstream_status, results.values())
    bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
    for k in bars:
        print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
Exemplo n.º 17
0
def chunk_maps(datamaps, factory=dict):
    if isinstance(datamaps, dict):
        return datamaps

    keys = datamaps[0].keys()
    datamap = {k: list(toolz.pluck(k, datamaps)) for k in keys}

    return dicttoolz.valmap(np.array, datamap, factory=factory)
Exemplo n.º 18
0
 def score(self, X, y, sample_weight=None, exposure=None):
     partial_arguments = self._process_args(y=y, sample_weight=sample_weight, exposure=exposure)
     predict_arguments = self._process_args(X=X, exposure=exposure)
     loss_function = lambda pred: self.loss_function(pred=shrinkd(1, pred), **valmap(shrinkd(1), partial_arguments))
     prediction = shrinkd(1, self.predict(**predict_arguments))
     loss = loss_function(prediction)
     initial_prediction = shrinkd(1, self.coefficients_[0] * self.estimators_[0].predict(**predict_arguments))
     initial_loss = loss_function(initial_prediction)
     return (initial_loss - loss) / initial_loss
Exemplo n.º 19
0
 def compare_results(compa, cols, vals):
     vals = tuple(map(none_to_minus_inf, vals))
     res = set([row['id'] for row in engine.execute(select(table.columns).where(compa))])
     all_ = [valmap(none_to_minus_inf, row) for row in engine.execute(select(table.columns))]
     cor = set()
     for row in all_:
         if tuple(row[col.name] for col in cols) > vals:
             cor.add(row['id'])
     assert_equal(res, cor)
Exemplo n.º 20
0
def sample(stream, key, limit):
    items = ijson.items(sys.stdin, 'item')

    classes = groupby(key, items)
    samples = valmap(
        lambda xs: random.sample(xs, limit
                                 if len(xs) >= limit else len(xs)), classes)
    sample = list(concat(samples.values()))
    return sample
Exemplo n.º 21
0
def histogram(results):
    from toolz import recipes, dicttoolz
    import math
    counts = recipes.countby(lambda r: r.upstream_status, results.values())
    bars = dicttoolz.valmap(
        lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
    for k in bars:
        print("%-20s %s (%d)" %
              (k.capitalize() if k else "No status", bars[k], counts[k]))
Exemplo n.º 22
0
def emptystr_to_none(item):
    """
    Replace empty strings with `None` recursively inside a JSON-like object.
    """
    if isinstance(item, list):
        return list(map(emptystr_to_none, item))
    if isinstance(item, dict):
        return valmap(emptystr_to_none, item)
    else:
        return None if item == "" else item
Exemplo n.º 23
0
    def consolidate(group):
        """
		[List] group => [Dictionary] consolidated position

		group is a list of cash entries of the same currency, here we add up
		their amount
		"""
        p = group[0].copy()
        p['balance'] = sum(map(lambda p: p['balance'], group))
        return valmap(lambda v: removeBOM(v) if isinstance(v, str) else v, p)
Exemplo n.º 24
0
 def load_from_file(self, filename):
     """
     Function to load policy from file
     """
     from_file = json.load(open(filename))
     self.policy.policy = dicttoolz.valmap(
         lambda d: dicttoolz.keymap(eval, d),
         dicttoolz.keymap(eval, from_file.get("policy")),
     )
     self.stats = from_file.get("stats")
Exemplo n.º 25
0
def getTotaledImportances(labels, forest):
    featDict = dict(zip(labels, forest.feature_importances_))
    uniqueLabels = set(x.split("Is")[0] for x in featDict.keys())

    featsAndOneHots = {y: [x for x in labels if y in x] for y in uniqueLabels}

    summedFeats = valmap(lambda x: sum(featDict[y] for y in x),
                         featsAndOneHots)

    return sorted(summedFeats.items(), key=lambda x: x[1], reverse=True)
def rotationMatrix(path):
    viewVectors = getTranslationVectorPerCamera(path)
    viewVectors = valmap(lambda arr: arr[:3], viewVectors)
    camera3 = viewVectors['card3JPG']
    camera5 = viewVectors['card5JPG']
    angle = angleBetween(np.array(camera3), np.array(camera5))
    rotationMatrix = createYRotationMatric(np.deg2rad(180))

    print(np.rad2deg(-angle))

    return rotationMatrix
def pofilesAPI():
    """Get a sorted list of all PO files"""
    response.content_type = "application/json"
    # Use the DE translation map as master (for no good reason, really)
    with open("cache/translation-filemap-de.json") as infile:
        filemap = json.load(infile)
        # Extract only paths
        pathdict = valmap(operator.itemgetter("path"), filemap)
        # Sort lexicographically
        pathlist = list(pathdict.values())
        pathlist = sorted(pathlist)
        return json.dumps(pathlist)
Exemplo n.º 28
0
def pofilesAPI():
    """Get a sorted list of all PO files"""
    response.content_type = 'application/json'
    # Use the DE translation map as master (for no good reason, really)
    with open("cache/translation-filemap-de.json") as infile:
        filemap = json.load(infile)
        # Extract only paths
        pathdict = valmap(operator.itemgetter("path"), filemap)
        # Sort lexicographically
        pathlist = list(pathdict.values())
        pathlist = sorted(pathlist)
        return json.dumps(pathlist)
def getCameraParameters(pathToBlockExchangeXML, pathToAgisoftXML):
    tree = ET.parse(pathToBlockExchangeXML)
    root = tree.getroot()
    block = root.find('Block')
    photoGroups = block.find('Photogroups').findall('Photogroup')
    counter = 1

    cardToPhotoGroup = {}

    for group in photoGroups:
        photos = group.findall('Photo')
        photos = map(getCameraName, photos)
        for photo in photos:
            cardToPhotoGroup[photo] = "photogoup{}".format(counter)
        counter += 1

    counter = 1

    photogoupToCameraParameters = {}

    for group in photoGroups:
        params = {}
        imageDimensions = map(lambda dim: dim.text,
                              list(group.find('ImageDimensions')))
        distortion = group.find('Distortion')
        distortions = "{} {}".format(
            distortion.find('K2').text,
            distortion.find('K3').text)

        params['ViewportPx'] = "{} {}".format(imageDimensions[0],
                                              imageDimensions[1])
        params['LensDistortion'] = distortions
        params['CenterPx'] = "{} {}".format(
            int(imageDimensions[0]) / 2,
            int(imageDimensions[1]) / 2)
        params['CameraType'] = '0'
        params['PixelSizeMm'] = "1 1"

        photogoupToCameraParameters["photogoup{}".format(counter)] = params

        counter += 1

    focalmmPerPhoto = getFocalFromAgisoftXml(pathToAgisoftXML)

    cardParams = valmap(
        lambda photogroup: photogoupToCameraParameters[photogroup],
        cardToPhotoGroup)

    for card in cardParams:
        focalLength = focalmmPerPhoto[card]
        cardParams[card]['FocalMm'] = focalLength

    return cardParams
Exemplo n.º 30
0
 def as_samplerate(self, samplerate):
     """
     Returns a copy of the current filter bank with a different samplerate.
     All filters are recomputed when calling this function.
     """
     # Fast path if samplerate has not changed
     if samplerate == self.samplerate:
         return self
     # Create new bank with recomputed filters.
     ret = FilterBank(samplerate)
     ret.filters = valmap(operator.methodcaller("as_samplerate", samplerate), self.filters)
     return ret
Exemplo n.º 31
0
    def __init__(self, stage):
        self.stage = stage
        elements = stage.stage_elements()

        def point_for_element(e):
            data = stage.element_data(e)
            # XXX: Y coordinate grows downwards on canvas
            return Vect2D(data['center_x'], -data['center_y'])

        element_pairs = list(zip(elements, elements[1:]+[elements[0]]))
        pair_dict = dict(zip(elements, element_pairs))
        #pair_dict = dict([(x[0], x) for x in element_pairs])
        point_dict = valmap(lambda es: (point_for_element(es[0]), point_for_element(es[1])), pair_dict)
        vector_dict = valmap(lambda ps: ps[1] - ps[0], point_dict)
        #vector_pairs_dict = valmap(lambda es: (vector_dict[es[0]], vector_dict[es[1]]), vector_dict)

        other_pairs = list(zip([elements[-1]] + elements[:-1], elements))
        other_dict = dict(zip(elements, other_pairs))
        other_pairs_dict = valmap(lambda es: (vector_dict[es[0]], vector_dict[es[1]]), other_dict)

        self.data = {
            'vector_angles': valmap(lambda vs: (-vs[0]).angleBetween(vs[1]), other_pairs_dict),
            'angles': valmap(lambda v: v.angle(), vector_dict),
            'length': valmap(lambda v: v.length(), vector_dict)
        }
Exemplo n.º 32
0
def find_matching_fn_abi(abi, fn_identifier=None, args=None, kwargs=None):
    args = args or tuple()
    kwargs = kwargs or dict()
    filters = []
    num_arguments = len(args) + len(kwargs)

    if fn_identifier is FallbackFn:
        return get_fallback_func_abi(abi)

    if not is_text(fn_identifier):
        raise TypeError("Unsupported function identifier")

    name_filter = functools.partial(filter_by_name, fn_identifier)
    arg_count_filter = functools.partial(filter_by_argument_count, num_arguments)
    encoding_filter = functools.partial(filter_by_encodability, args, kwargs)
    filters.extend([
        name_filter,
        arg_count_filter,
        encoding_filter,
    ])
    function_candidates = pipe(abi, *filters)
    if len(function_candidates) == 1:
        return function_candidates[0]
    else:
        matching_identifiers = name_filter(abi)
        matching_function_signatures = [abi_to_signature(func) for func in matching_identifiers]
        arg_count_matches = len(arg_count_filter(matching_identifiers))
        encoding_matches = len(encoding_filter(matching_identifiers))
        if arg_count_matches == 0:
            diagnosis = "\nFunction invocation failed due to improper number of arguments."
        elif encoding_matches == 0:
            diagnosis = "\nFunction invocation failed due to improper argument encoding."
        elif encoding_matches > 1:
            diagnosis = (
                "\nAmbiguous argument encoding. "
                "Provided arguments can be encoded to multiple functions matching this call."
            )
        message = (
            "\nCould not identify the intended function with name `{name}`, "
            "positional argument(s) of type `{arg_types}` and "
            "keyword argument(s) of type `{kwarg_types}`."
            "\nFound {num_candidates} function(s) with the name `{name}`: {candidates}"
            "{diagnosis}"
        ).format(
            name=fn_identifier,
            arg_types=tuple(map(type, args)),
            kwarg_types=valmap(type, kwargs),
            num_candidates=len(matching_identifiers),
            candidates=matching_function_signatures,
            diagnosis=diagnosis,
        )
        raise ValidationError(message)
Exemplo n.º 33
0
 def as_samplerate(self, samplerate):
     """
     Returns a copy of the current filter bank with a different samplerate.
     All filters are recomputed when calling this function.
     """
     # Fast path if samplerate has not changed
     if samplerate == self.samplerate:
         return self
     # Create new bank with recomputed filters.
     ret = FilterBank(samplerate)
     ret.filters = valmap(
         operator.methodcaller("as_samplerate", samplerate), self.filters)
     return ret
Exemplo n.º 34
0
 def __init__(
     self,
     adjacency_dict: Mapping[Vertex, Set[EdgeAsTuple]],
     edge_type = WeightedEdge,
 ):
     super().__init__()
     # the following line is the functional equivalent of
     # {k: {edge_type(x) for x in v} for k, v in adjacency_dict.items()}
     self.adjacencies: Dict[V, Set[E]] = valmap(
         compose(set, partial(starmap, edge_type)),
         adjacency_dict,
     )
     self.marks: Dict[Vertex, bool] = {}
     self.in_use: Dict[Vertex, bool] = {}
Exemplo n.º 35
0
def decode(r: Any) -> Any:
    """
    Decodes a value from an intermediate representation `r`.

    Parameters
    ----------
    r
        An intermediate representation to be decoded.

    Returns
    -------
    Any
        A Python data structure corresponding to the decoded version of ``r``.

    See Also
    --------
    encode
        Inverse function.
    """

    # structural recursion over the possible shapes of r
    if type(r) == dict and "__kind__" in r:
        kind = r["__kind__"]
        cls = cast(Any, locate(r["class"]))

        assert cls is not None, f"Can not locate {r['class']}."

        if kind == Kind.Type:
            return cls

        args = decode(r.get("args", []))
        kwargs = decode(r.get("kwargs", {}))

        if kind == Kind.Instance:
            return cls(*args, **kwargs)

        if kind == Kind.Stateful:
            obj = cls.__new__(cls)
            obj.__dict__.update(kwargs)
            return obj

        raise ValueError(f"Unknown kind {kind}.")

    if type(r) == dict:
        return valmap(decode, r)

    if type(r) == list:
        return list(map(decode, r))

    return r
Exemplo n.º 36
0
def getAssetCountryAllocation(date, blpData, assetTypeTuples, countryGroups, positions):
	"""
	[String] date (yyyymmdd),
	[Dictionary] blpData,
	[Iterator] assetTypeTuples,
	[List] countryGroups,
	[Iterator] positions
		=> [Dictionary] assetypeTuple -> [Dictionary] countryGroup -> List of
			positions that fall into this asset type and this country group
	"""
	return \
	valmap( partial(getCountryGroupAllocation, date, blpData, countryGroups)
		  , getAssetTypeAllocation(date, blpData, assetTypeTuples, positions)
		  )
    def _get_balance(self, cr, uid, ids, name, args, context=None):
        """Computed as following:
        A) Cleared Deposits, Credits, and Interest Amount: SUM of Amts of lines
           Cleared Deposits, Credits, and Interest # of Items: Number of lines

        B) Checks, Withdrawals, Debits, and Service Charges Amount:
           Checks, Withdrawals, Debits, and Service Charges Amount # of Items:

        Cleared Balance:
            (Total Sum of the Deposit Amount Cleared (A) –
             Total Sum of Checks Amount Cleared (B))
        Difference=
            (Ending Balance – Beginning Balance) - cleared balance
            should be zero.
        """
        res = {}
        account_precision = self.pool['decimal.precision'].precision_get(
            cr, uid, 'Account')
        for stmt in self.browse(cr, uid, ids, context=context):
            res[stmt.id] = {}
            cleared = lambda l: l.cleared_bank_account and 'Cleared' or 'Uncleared'
            get_amount = lambda l: [
                round(v.amount, account_precision) for v in l
            ]
            process_lines = compose(valmap(get_amount), groupby(cleared))

            for line_type in ('debit', 'credit'):
                r = process_lines(eval('stmt.%s_move_line_ids' % line_type))
                res[stmt.id].update({
                    'sum_of_%ss' % line_type:
                    sum(r.get('Cleared', [])),
                    'sum_of_%ss_lines' % line_type:
                    len(r.get('Cleared', [])),
                    'sum_of_%ss_unclear' % line_type:
                    sum(r.get('Uncleared', [])),
                    'sum_of_%ss_lines_unclear' % line_type:
                    len(r.get('Uncleared', []))
                })

            res[stmt.id]['cleared_balance'] = round(
                res[stmt.id]['sum_of_debits'] - res[stmt.id]['sum_of_credits'],
                account_precision)
            res[stmt.id]['uncleared_balance'] = round(
                res[stmt.id]['sum_of_debits_unclear'] -
                res[stmt.id]['sum_of_credits_unclear'], account_precision)
            res[stmt.id]['difference'] = round(
                (stmt.ending_balance - stmt.starting_balance) -
                res[stmt.id]['cleared_balance'], account_precision)
        return res
Exemplo n.º 38
0
    def computeRuleHitsForFileSet(self, poFiles):
        """
        For each file in the given filename -> PO object dictionary,
        compute the Rule -> Hits dictonary.

        Stores the information in the current instance.
        Does not return anything
        """
        # Compute dict with sorted & prettified filenames
        self.files = sorted(poFiles.keys())
        # Add all futures to the executor
        futures = list(itertools.chain(*(self.computeRuleHits(po, filename)
                                         for filename, po in poFiles.items())))
        # Process the results in first-received order. Also keep track of rule performance
        self.fileRuleHits = collections.defaultdict(dict)
        n_finished = 0
        # Intermediate result storage
        raw_results = collections.defaultdict(dict) # filename -> {rule: result}
        for future in concurrent.futures.as_completed(futures):
            # Extract result
            filename, rule, result = future.result()
            self.fileRuleHits[filename][rule] = result
            # Track progress
            n_finished += 1
            if n_finished % 1000 == 0:
                percent_finished = n_finished * 100. / len(futures)
                print("Rule computation finished {0:.2f} %".format(percent_finished))

        # Compute total stats by file
        self.statsByFile = {
            filename: merge(self.ruleHitsToSeverityCountMap(ruleHits), {
                            "translation_url": self.translationURLs[filename]})
            for filename, ruleHits in self.fileRuleHits.items()
        }
        # Compute map filename -> {rule: numHits for rule}
        self.statsByFileAndRule = {
            filename: valmap(len, ruleHits)
            for filename, ruleHits in self.fileRuleHits.items()
        }
        # Compute map rule -> numHits for rule
        self.totalStatsByRule = merge_with(sum, *(self.statsByFileAndRule.values()))
Exemplo n.º 39
0
from toolz.dicttoolz import valmap

from state import goal

operations = {op.__name__: op for op in (
    namedtuple('partition', ['coll', 'by']),
    namedtuple('len', ['coll']),
    namedtuple('max', ['coll']),
    namedtuple('map', ['coll', 'fn']),
    namedtuple('collection', ['type']),
    namedtuple('attr', ['elem', 'attr']),
    namedtuple('nth', ['coll', 'n']),
)}

collections = valmap(operations['collection'], {
    'goals': goal,
})

globals().update(operations)
globals().update(collections)
__all__ = operations.keys() + collections.keys()


# TODO: Should isinstance(..., walker) work?
class walker(object):
    class __metaclass__(type):
        def __new__(cls, name, bases, attrs):
            # Make walker a regular class so it can be subclassed.
            if bases == (object,):
                return type.__new__(cls, name, bases, attrs)
Exemplo n.º 40
0
 def relatedness_each(self):
     # atomistic, contiguous, integrated_projected
     norm_factor = 6 * (len(self.elements) - 1)
     return valmap(lambda x: x['relatedness'] / norm_factor, self.results)
Exemplo n.º 41
0
 def dominance_each(self):
     # 0 - abscence, 2 - secondary, 4 - dominance
     norm_factor = 2 * (len(self.elements) - 1)
     return valmap(lambda x: x['dominance'] / norm_factor, self.results)
Exemplo n.º 42
0
def test_valmap():
    assert valmap(inc, {1: 1, 2: 2}) == {1: 2, 2: 3}
Exemplo n.º 43
0
Arquivo: map.py Projeto: tek/tryp.py
 def valmap(self, f: Callable[[B], C]) -> 'Map[A, C]':
     return Map(dicttoolz.valmap(f, dict(self)))
 def test_valmap(self):
     D, kw = self.D, self.kw
     assert valmap(inc, D({1: 1, 2: 2}), **kw) == D({1: 2, 2: 3})
Exemplo n.º 45
0
init_class_file = config.get("Batch", "init_class_file")
online_class_file = config.get("Batch", "online_class_file")
journey_cluster_file = config.get("Batch", "journey_cluster_file")

logfile = config.get("Directories", "logfile")

logging.basicConfig(filename=logfile, level=logging.ERROR)

# Load models
init_class_models = joblib.load(storedmodel_directory + init_class_file)
online_class_models = joblib.load(storedmodel_directory + online_class_file)

# Load information from historical data
historical_journey_clusters = joblib.load(storedmodel_directory + journey_cluster_file)

end_locations = dicttoolz.valmap(lambda x: [journey_cluster.averages[["EndLat", "EndLong"]].values.tolist() for
                                 journey_cluster in x if journey_cluster.clusterID != -1], historical_journey_clusters)

mpg_insts = dicttoolz.valmap(lambda x: [journey_cluster.averages["MPG_from_MAF"] if "MPG_from_MAF" in journey_cluster.averages.keys() else float("NaN") for
                                        journey_cluster in x if journey_cluster.clusterID != -1], historical_journey_clusters)

cluster_ids = dicttoolz.valmap(lambda x: [str(journey_cluster.clusterID) for
                                          journey_cluster in x if journey_cluster.clusterID != -1], historical_journey_clusters)

journeys = {}
initial_predictions = {}
prob_dict = {}
time_dict = {}


def callback(body):
    try: