def __init__(self, args, gene_set):
     self.args = args
     self.gene_set = gene_set
     self.biotypes = gene_set.biotypes
     self.work_dir = os.path.join(args.workDir, gene_set.sourceGenome, gene_set.geneSet)
     self.metrics_dir = os.path.join(self.work_dir, 'transMap_gene_set_metrics')
     self.output_dir = os.path.join(args.outputDir, gene_set.sourceGenome, gene_set.geneSet)
     self.jobtree_dir = os.path.join(args.jobTreeDir, gene_set.sourceGenome, gene_set.geneSet)
     self.db = os.path.join(self.work_dir, 'comparativeAnnotator', 'classification.db')
     self.target_genomes = frozenset(set(args.targetGenomes) - set([gene_set.sourceGenome]))
     self.ordered_target_genomes = tuple([x for x in gene_set.orderedTargetGenomes if x in self.target_genomes])
     self.query_cfg = QueryCfg(self)
     self.query_target_cfgs = frozendict((genome, QueryTargetCfg(self, genome)) for genome in self.target_genomes)
     self.tm_plots = frozendict((biotype, TransMapPlotCfg(self, self.query_cfg, self.query_target_cfgs, biotype))
                                for biotype in self.biotypes)
     self.gene_set_plots = GeneSetPlotCfg(self, self.query_target_cfgs, self.metrics_dir, mode='transMap')
     if args.augustus is True:
         mode = 'AugustusTMR' if args.augustusHints is not None else 'AugustusTM'
         self.aug_metrics_dir = os.path.join(self.work_dir, '{}_consensus_gene_set_metrics'.format(mode))
         self.augustus_genomes = frozenset(set(args.augustusGenomes) - set([gene_set.sourceGenome]))
         self.ordered_augustus_genomes = tuple([x for x in gene_set.orderedTargetGenomes
                                                if x in self.augustus_genomes])
         self.augustus_cfgs = frozendict((genome, AugustusCfg(self.query_target_cfgs[genome], genome,
                                                              mode, self.aug_metrics_dir))
                                         for genome in self.augustus_genomes)
         self.augustus_gene_set_plots = GeneSetPlotCfg(self, self.query_target_cfgs, self.aug_metrics_dir, mode)
 def __init__(self, cfg, target_genome, mode, metrics_dir):
     self.__dict__.update(vars(cfg))  # bring in all configurations from query-target
     self.query_target_cfg = cfg
     self.query_cfg = cfg.query_cfg
     self.query_genome = cfg.query_genome
     self.gene_set_name = cfg.gene_set_name
     self.mode = mode
     self.args = cfg.args
     self.target_genome = target_genome
     self.fasta = cfg.genome_fasta
     self.chrom_sizes = cfg.chrom_sizes
     self.transcript_fasta = cfg.transcript_fasta
     # files that will be produced
     self.augustus_gtf = os.path.join(cfg.work_dir, mode, target_genome + '.gtf')
     self.augustus_gp = os.path.join(cfg.work_dir, mode, target_genome + '.gp')
     self.augustus_bed = os.path.join(cfg.work_dir, mode, target_genome + '.bed')
     self.tmr_transcript_fasta = os.path.join(cfg.work_dir, mode, target_genome + '.transcripts.fa')
     self.vector_gp = os.path.join(cfg.work_dir, mode, target_genome + '.vector.gp')
     # run augustus
     tmr_jobtree = os.path.join(self.jobtree_dir, mode, self.target_genome)
     self.tmr = TMRJobTree(self, tmr_jobtree)
     # align augustus
     align_jobtree = os.path.join(self.jobtree_dir, 'align' + mode, self.target_genome)
     self.align = AlignAugustusJobTree(self, align_jobtree)
     # comparative Annotator
     aug_comp_ann_jobtree = os.path.join(self.jobtree_dir, 'augustusComparativeAnnotator', self.target_genome)
     self.comp_ann = CompAnnJobTree(self, aug_comp_ann_jobtree, mode='augustus')
     # consensus gene set
     self.gene_set_dir = os.path.join(self.output_dir, '{}_consensus_gene_set'.format(mode), target_genome)
     self.geneset_gps = frozendict((x, os.path.join(self.gene_set_dir, x + '.gp')) for x in self.biotypes)
     self.combined_gp = os.path.join(self.gene_set_dir, 'combined.gp')
     self.combined_gtf = os.path.join(self.gene_set_dir, 'combined.gtf')
     self.geneset_gtfs = frozendict((x, os.path.join(self.gene_set_dir, x + '.gtf')) for x in self.biotypes)
     self.metrics_dir = metrics_dir
     self.pickled_metrics = os.path.join(self.metrics_dir, '{}.pickle'.format(target_genome))
Ejemplo n.º 3
0
    def __init__(self, _head, _bindings={}, _source=None, _id=None, **kwargs):
        self._head = _head
        assert isinstance(_head, str) or isinstance(_head, unicode)
        self._bindings = frozendict(_bindings, **kwargs)
        for v in self._bindings.values():
            assert(isinstance(v, Term))

        if _source is None:
            self._source = construction(self._head, self._bindings)
        else:
            self._source = _source

        if _id is None:
            self._id = hash((
                self._head, 
                frozendict({k:v.id for k, v in self._bindings.iteritems()})
            ))
        else:
            self._id = _id

        #TODO fix hash and id collisions (right now we just count on not running into any)
        #this is non-trivial, since something might collide with a DB entry
        #but it's also quite unlikely to cause trouble
        self._hash = hash((
            self._head,
            self._bindings,
            self._source
        ))
Ejemplo n.º 4
0
 def freeze(cls, other):
     """
     A frozen copy of a lex class
     """
     return LexClass(frozendict(other.word_to_subclass.items()),
                     frozendict((k, frozenset(v)) for k, v in
                                other.subclass_to_words.items()))
Ejemplo n.º 5
0
def _google_maps_leg_costs(mode, places):
    """
    Looks up distances and durations on Google Maps.
    :param mode: A Google Maps mode, e.g. 'driving'.
    :param places: An iterable of Places.
    :return: A dict mapping each of 'duration' and 'distance' to
     a frozendict mapping Place pairs to relevant values.
    """
    for waypoint in places:
        assert isinstance(waypoint, Place)
    distance = dict()
    duration = dict()
    # Call Google Maps API
    response = _client_().distance_matrix(origins=[p.address for p in places],
                                          destinations=[p.address for p in places],
                                          mode=mode,
                                          units='metric')
    # Verify and parse response
    assert response['status'] == 'OK'
    rows = response['rows']
    assert len(rows) == len(places)
    # Populate the dicts distance and duration
    for row, origin in zip(rows, places):
        row_elements = row['elements']  # There's also data about exact addresses used
        assert len(row_elements) == len(places)
        for element, destination in zip(row_elements, places):
            assert element['status'] == 'OK'
            duration[(origin, destination)] = element['duration']['value']
            distance[(origin, destination)] = element['distance']['value']
    # Construct and return the dict
    return {'distance': frozendict(distance), 'duration': frozendict(duration)}
Ejemplo n.º 6
0
    def __init__(self, callback,
                 args=None, kwargs=None, details_filter=None,
                 weak=False):
        """Initialize members

        :param callback: callback function
        :param details_filter: a callback that will be called before the
                               actual callback that can be used to discard
                               the event (thus avoiding the invocation of
                               the actual callback)
        :param args: non-keyworded arguments
        :type args: list/iterable/tuple
        :param kwargs: key-value pair arguments
        :type kwargs: dictionary
        :param weak: whether the callback provided is referenced via a
                     weak reference or a strong reference
        :type weak: bool
        """
        self._uuid = uuidutils.generate_uuid()
        self._callback = callback
        self._details_filter = details_filter
        self._weak = weak
        if not args:
            self._args = ()
        else:
            if not isinstance(args, tuple):
                self._args = tuple(args)
            else:
                self._args = args
        if not kwargs:
            self._kwargs = frozendict()
        else:
            self._kwargs = frozendict(kwargs)
Ejemplo n.º 7
0
 def it_encodes_frozendict(self):
     test_dict = frozendict({
         'pgdir': 'playground',
         'services': ('default',),
         'aliases': frozendict({
             'default': ('')
         }),
     })
     result = JSONEncoder(sort_keys=True, indent=4).encode(test_dict)
     assert norm_trailing_whitespace_json(result) == '''\
Ejemplo n.º 8
0
 def to_cache_value(self):
     return {
         'schema_id': self.schema_id,
         'schema_json': frozendict(self.schema_json),
         'topic_name': self.topic.name,
         'base_schema_id': self.base_schema_id,
         'status': self.status,
         'primary_keys': self.primary_keys,
         'note': frozendict(self.note.to_cache_value()) if self.note is not None else None,
         'created_at': self.created_at,
         'updated_at': self.updated_at
     }
 def __init__(self, cfg, target_genome):
     # input arguments
     self.query_cfg = cfg.query_cfg
     self.mode = 'transMap'
     self.args = cfg.args
     self.hal = self.args.hal
     self.ref_fasta = cfg.query_cfg.ref_fasta
     self.ref_two_bit = cfg.query_cfg.ref_two_bit
     self.ref_sizes = cfg.query_cfg.ref_sizes
     self.ref_psl = cfg.query_cfg.psl
     self.annotation_gp = cfg.gene_set.genePred
     self.transcript_fasta = cfg.query_cfg.transcript_fasta
     self.flat_transcript_fasta = self.transcript_fasta + '.flat'
     self.target_genome = target_genome
     self.query_genome = cfg.gene_set.sourceGenome
     self.attrs_tsv = cfg.gene_set.attributesTsv
     self.gene_set_name = cfg.gene_set.geneSet
     self.db = cfg.db
     self.jobtree_dir = cfg.jobtree_dir
     self.output_dir = cfg.output_dir
     self.work_dir = cfg.work_dir
     self.metrics_dir = cfg.metrics_dir
     self.biotypes = cfg.biotypes
     # genome files
     self.genome_work_dir = os.path.join(cfg.work_dir, 'genome_files')
     self.genome_fasta = os.path.join(self.genome_work_dir, target_genome + '.fa')
     self.genome_two_bit = os.path.join(self.genome_work_dir, target_genome + '.2bit')
     self.chrom_sizes = os.path.join(self.genome_work_dir, target_genome + '.chrom.sizes')
     self.flat_fasta = self.genome_fasta + '.flat'
     # to make QueryCfg and TargetQueryCfg compatible for production of genome files
     self.genome = self.target_genome
     # chaining config (passed to jobTree)
     self.chaining = ChainingCfg(self)
     # transMap
     self.tm_work_dir = os.path.join(cfg.work_dir, 'transMap')
     self.psl = os.path.join(self.tm_work_dir, target_genome + '.psl')
     self.gp = os.path.join(self.tm_work_dir, target_genome + '.gp')
     self.bed = os.path.join(self.tm_work_dir, target_genome + '.bed')
     # comparativeAnnotator
     comp_ann_jobtree = os.path.join(self.jobtree_dir, 'comparativeAnnotator', self.target_genome)
     self.comp_ann = CompAnnJobTree(self, comp_ann_jobtree, mode=self.mode)
     # final gene set
     self.gene_set_dir = os.path.join(self.output_dir, 'transMap_gene_set', target_genome)
     self.geneset_gps = frozendict((x, os.path.join(self.gene_set_dir, x + '.gp')) for x in self.biotypes)
     self.combined_gp = os.path.join(self.gene_set_dir, 'combined.gp')
     self.combined_gtf = os.path.join(self.gene_set_dir, 'combined.gtf')
     self.geneset_gtfs = frozendict((x, os.path.join(self.gene_set_dir, x + '.gtf')) for x in self.biotypes)
     self.pickled_metrics = os.path.join(self.metrics_dir, '{}.pickle'.format(target_genome))
     self.filter_chroms = self.args.filterChroms
Ejemplo n.º 10
0
def test_get_context_from_cmdln(t_config):
    path = os.path.join(os.path.dirname(__file__), "data", "good.json")
    c = deepcopy(dict(DEFAULT_CONFIG))
    with open(path) as fh:
        c.update(json.load(fh))
    expected_creds = frozendict(c['credentials'])
    del(c['credentials'])
    expected_config = frozendict(config.apply_product_config(c))

    def noop(*args, **kwargs):
        pass

    context, credentials = config.get_context_from_cmdln([path])
    assert credentials == expected_creds
    assert context.config == expected_config
Ejemplo n.º 11
0
def discrete_finite_max(pmf, leave_unoptimized=None):
    neg_log_p = pmf.Param['NegLogP']
    if leave_unoptimized:
        comparison_bases = {}
        conditioned_and_unoptimized_vars = set(pmf.Cond) | set(leave_unoptimized)
        for var_names_and_values___frozen_dict in neg_log_p:
            comparison_basis = {}
            for var in (set(var_names_and_values___frozen_dict) & conditioned_and_unoptimized_vars):
                comparison_basis[var] = var_names_and_values___frozen_dict[var]
            comparison_bases[var_names_and_values___frozen_dict] = frozendict(comparison_basis)
    else:
        comparison_bases = pmf.CondInstances
    neg_log_mins = {}
    for var_names_and_values___frozen_dict, func_value in neg_log_p.items():
        comparison_basis = comparison_bases[var_names_and_values___frozen_dict]
        if comparison_basis in neg_log_mins:
            neg_log_mins[comparison_basis] = min(neg_log_mins[comparison_basis], func_value)
        else:
            neg_log_mins[comparison_basis] = func_value
    optims = {}
    for var_names_and_values___frozen_dict, func_value in neg_log_p.items():
        if func_value <= neg_log_mins[comparison_bases[var_names_and_values___frozen_dict]]:
            optims[var_names_and_values___frozen_dict] = func_value
    return DiscreteFinitePMF(var_names_and_syms=pmf.Vars.copy(), p_or_neg_log_p=optims, p=False,
                             cond=pmf.Cond.copy(), scope=pmf.Scope.copy())
 def __init__(self, cfg, jobtree_dir):
     args = cfg.args
     self.__dict__.update(vars(args.jobTreeOptions))
     self.defaultMemory = 8 * 1024 ** 3
     self.jobTree = jobtree_dir
     tm_2_hints_script = 'submodules/comparativeAnnotator/augustus/transMap2hints.pl'  # TODO: don't hardcode
     assert os.path.exists(tm_2_hints_script)
     tm_2_hints_params = ("--ep_cutoff=0 --ep_margin=12 --min_intron_len=40 --start_stop_radius=5 --tss_tts_radius=5"
                          " --utrend_cutoff=6 --in=/dev/stdin --out=/dev/stdout")
     self.tm_2_hints_cmd = " ".join([tm_2_hints_script, tm_2_hints_params])
     # can we run TMR or just TM?
     self.cfgs = {1: "submodules/comparativeAnnotator/augustus/extrinsic.ETM1.cfg"}
     if args.augustusHints is not None:
         self.cfgs[2] = "submodules/comparativeAnnotator/augustus/extrinsic.ETM2.cfg"
     self.cfgs = frozendict(self.cfgs)
     assert all([os.path.exists(x) for x in self.cfgs.itervalues()])
     self.augustus_bin = 'submodules/augustus/bin/augustus'
     assert os.path.exists(self.augustus_bin)
     self.padding = 20000
     self.max_gene_size = 3000000
     self.hints_db = args.augustusHints
     self.out_gtf = cfg.augustus_gtf
     self.genome = cfg.target_genome
     self.fasta = cfg.fasta
     self.chrom_sizes = cfg.chrom_sizes
     self.input_gp = cfg.vector_gp
Ejemplo n.º 13
0
def _unwrap_object(obj, nested=False):
    obj_type = obj['_type']
    value = obj.get('value', None)

    if obj_type == 'none':
        return None

    if obj_type in ('bool', 'str', 'int', 'float'):
        return locate(obj_type)(value)

    if obj_type == 'decimal':
        return Decimal(value)

    if obj_type == 'datetime':
        return datetime.datetime.utcfromtimestamp(value)

    if obj_type in ('list', 'dict'):
        return locate(obj_type)(unwraps(value)) if nested else value

    if obj_type in ('set', 'frozenset', 'tuple'):
        if nested:
            value = unwraps(value)
        return locate(obj_type)(value)

    if obj_type == 'frozendict':
        if nested:
            value = unwraps(value)
        return frozendict(value)

    raise ValueError(repr(obj) + ' cannot be decoded.')
Ejemplo n.º 14
0
def _to_hashable_value(value):
    """Return a frozen, hashable copy of "value".

    mixed value - The pseudo-hashable value, as defined in the comments
        for cached_generator.
    return object - The frozen, hashable copy.
    """
    if isinstance(value, (list, tuple)):
        return tuple(list([_to_hashable_value(element) for element in value]))
    elif isinstance(value, (frozenset, set)):
        return frozenset(
            list([_to_hashable_value(element) for element in value]))
    elif (isinstance(
            value,
            (dict, frozendict.frozendict, frozendict.FrozenOrderedDict))):
        if (isinstance(
                value,
                (collections.OrderedDict, frozendict.FrozenOrderedDict))):
            hashable_value = collections.OrderedDict()
        else:
            hashable_value = {}
        for key, sub_value in value.iteritems():
            hashable_value[key] = _to_hashable_value(sub_value)
        if isinstance(value, collections.OrderedDict):
            return frozendict.frozendict(hashable_value)
        else:
            return frozendict.FrozenOrderedDict(hashable_value)
    else:
        return value
Ejemplo n.º 15
0
 def op(self, op=mul, other=None, r=False, **kwargs):
     math_dict = MathDict()
     if hasattr(other, 'keys'):
         for item_0, item_1 in product(self.items(), other.items()):
             vars_and_values_0___frozen_dict, func_value_0 = item_0
             vars_and_values_1___frozen_dict, func_value_1 = item_1
             same_vars_same_values = True
             for var in (set(vars_and_values_0___frozen_dict) & set(vars_and_values_1___frozen_dict)):
                 if vars_and_values_0___frozen_dict[var] != vars_and_values_1___frozen_dict[var]:
                     same_vars_same_values = False
                     break
             if same_vars_same_values:
                 if r:
                     value = op(func_value_1, func_value_0, **kwargs)
                 else:
                     value = op(func_value_0, func_value_1, **kwargs)
                 math_dict[frozendict(set(vars_and_values_0___frozen_dict.items()) |
                                      set(vars_and_values_1___frozen_dict.items()))] = value
     elif other is None:
         for k, v in self.items():
             math_dict[k] = op(v, **kwargs)
     elif r:
         for k, v in self.items():
             math_dict[k] = op(other, v, **kwargs)
     else:
         for k, v in self.items():
             math_dict[k] = op(v, other, **kwargs)
     return math_dict
Ejemplo n.º 16
0
 def genReport(self, data):
     """deduplicate gathered case data"""
     dupecount = 0
     output = dict()
     for change in data["records"]:
         for line in change["FeedTrackedChanges"]["records"]:
             if line is not None:
                 if line["NewValue"] in (
                         "Ready For Close",
                         "Closed",
                         "Cancelled",
                         "Closed as Duplicate"):
                     caseid = nestedGet(["Parent", "CaseNumber"], change)
                     changedate = dateparser.parse(change["CreatedDate"])
                     # need to account for >1 escalation per case
                     if caseid in output:
                         # chronological order - latest gets it
                         if output[caseid]["Date"] > changedate:
                             dupecount += 1
                             continue
                     if nestedGet(["Parent", "Cancel_Effective_Date__c"],
                                  change) is not None:
                         teardown = True
                     else:
                         teardown = False
                     output[caseid] = frozendict(
                         Name=nestedGet(["CreatedBy", "Name"], change),
                         Case=caseid,
                         Status=line["NewValue"],
                         Teardown=teardown,
                         Date=changedate)
     print "Found and removed", dupecount, "cases handled more than once."
     print "Credit for duplicates given to latest resolver."
     return output
Ejemplo n.º 17
0
def compile_dags(rules, forest):
  trees = split_dags(forest)

  prev_schedules = defaultdict(list)
  prev_schedules[frozendict()] = []
  for component in trees:
    schedules = {}
    for out_resources in all_out_resources(component, rules):
      best = [None, None]
      def update_best(sched):
        cost = sum(match.rule.cost for match in sched)
        if not best[0] or best[1] > cost:
          best[0] = (sched, externs)
          best[1] = cost

      for externs in prev_schedules:
        matches = {}
        for expr in component:
          matches.update(find_matches(rules, expr, externs))

        sched = compile_exprs(matches, component, out_resources, frozenset(), set(), {}, externs)
        if sched != None:
          update_best(sched)

      if not best[0]:
        return None
      sched, externs = best[0]
      schedules[out_resources] = prev_schedules[externs] + sched

    prev_schedules = schedules

  best_externs = min(prev_schedules, key = lambda r: sum(match.rule.cost for match in prev_schedules[r]))
  return prev_schedules[best_externs]
Ejemplo n.º 18
0
def _decode_state_dict(input):
    """Decodes a state dict encoded using `_encode_state_dict` above
    """
    if input is None:
        return None

    return frozendict({(etype, state_key,): v for etype, state_key, v in input})
Ejemplo n.º 19
0
 def optim(self, max_or_min=max, leave_unoptimized=None):
     if max_or_min is max:
         comp = ge
     else:
         comp = le
     if leave_unoptimized:
         comparison_bases = {}
         conditioned_and_unoptimized_vars = set(self.Cond) | set(leave_unoptimized)
         for vars_and_values___frozen_dict in self.Mapping:
             comparison_basis = {}
             for var in (set(vars_and_values___frozen_dict) & conditioned_and_unoptimized_vars):
                 comparison_basis[var] = vars_and_values___frozen_dict[var]
             comparison_bases[vars_and_values___frozen_dict] = frozendict(comparison_basis)
     else:
         comparison_bases = self.CondInstances
     optim_values = {}
     for vars_and_values___frozen_dict, func_value in self.Mapping.items():
         comparison_basis = comparison_bases[vars_and_values___frozen_dict]
         if comparison_basis in optim_values:
             optim_values[comparison_basis] = max_or_min(optim_values[comparison_basis], func_value)
         else:
             optim_values[comparison_basis] = func_value
     optims = {}
     for vars_and_values___frozen_dict, func_value in self.Mapping.items():
         if comp(func_value, optim_values[comparison_bases[vars_and_values___frozen_dict]]):
             optims[vars_and_values___frozen_dict] = func_value
     return MathFunc(self.Vars.copy(), optims, cond=self.Cond.copy(), scope=self.Scope.copy())
Ejemplo n.º 20
0
    def __init__(self, var_names_and_syms={}, mapping={}, param={}, cond={}, scope={}, compile=False):
        if not hasattr(self, 'Vars'):
            self.Vars = var_names_and_syms   # {var_name: var_symbol} dict
        if not hasattr(self, 'Param'):
            self.Param = param
        self.Cond = cond   # {var_name: var_value} dict, var_value can be None if conditioning is generic
        self.Scope = dict.fromkeys(set(var_names_and_syms) - set(cond))
        vars_with_fixed_scope_values = {}   # to keep track of scope variables with fixed values (i.e. points in space)
        for var, value in scope.items():
            if (var in self.Scope) and (value is not None):
                self.Scope[var] = value    # "points-in-space"
                vars_with_fixed_scope_values[var] = value
        s0 = set(vars_with_fixed_scope_values.items())
        if hasattr(mapping, 'keys'):
            self.Mapping = MathDict()
            self.CondInstances = {}
            for vars_and_values___frozen_dict, func_value in mapping.items():
                if set(vars_and_values___frozen_dict.items()) >= s0:
                    self.Mapping[vars_and_values___frozen_dict] = func_value
                    condition_instance = {}
                    for var in (set(vars_and_values___frozen_dict) & set(cond)):
                        condition_instance[var] = vars_and_values___frozen_dict[var]
                    self.CondInstances[vars_and_values___frozen_dict] = frozendict(condition_instance)
        else:
            self.Mapping = mapping

        if not hasattr(self, 'CompyledFunc'):
            self.CompyledFunc = None
        if compile:
            self.compile()
Ejemplo n.º 21
0
 def marg(self, *marginalized_vars, **kwargs):
     itself = lambda x: x
     if 'transf' in kwargs:
         transf_func = kwargs['transf']
     else:
         transf_func = itself
     if 'reduce_func' in kwargs:
         reduce_func = kwargs['reduce_func']
     else:
         reduce_func = add
     if 'rev_transf' in kwargs:
         rev_transf_func = kwargs['rev_transf']
     else:
         rev_transf_func = itself
     var_names_and_symbols___dict = self.Vars.copy()   # just to be careful
     scope = self.Scope.copy()   # just to be careful
     mapping = self.Mapping.copy()   # just to be careful
     for marginalized_var in marginalized_vars:
         del var_names_and_symbols___dict[marginalized_var]
         del scope[marginalized_var]
         d = {}
         for vars_and_values___frozen_dict, func_value in mapping.items():
             marginalized_var_value = vars_and_values___frozen_dict[marginalized_var]
             fd = frozendict(set(vars_and_values___frozen_dict.items()) -
                             {(marginalized_var, marginalized_var_value)})
             if fd in d:
                 d[fd] = reduce_func(d[fd], transf_func(func_value))
             else:
                 d[fd] = transf_func(func_value)
         mapping = {k: rev_transf_func(v) for k, v in d.items()}
     return MathFunc(var_names_and_symbols___dict, mapping, cond=self.Cond.conds(), scope=scope)
Ejemplo n.º 22
0
def shift_time_subscripts(obj, t, *matrix_symbols_to_shift):
    if isinstance(obj, MathDict):
        return MathDict({shift_time_subscripts(key, t): shift_time_subscripts(value, t)
                           for key, value in obj.items()})
    elif isinstance(obj, frozendict):
        return frozendict({shift_time_subscripts(key, t): shift_time_subscripts(value, t)
                           for key, value in obj.items()})
    elif isinstance(obj, tuple):
        if len(obj) == 2 and not(isinstance(obj[0], (int, float))) and isinstance(obj[1], int):
            return shift_time_subscripts(obj[0], t), obj[1] + t
        else:
            return tuple(shift_time_subscripts(item, t) for item in obj)
    elif isinstance(obj, list):
        return [shift_time_subscripts(item, t) for item in obj]
    elif isinstance(obj, set):
        return {shift_time_subscripts(item, t) for item in obj}
    elif isinstance(obj, dict):
        return {shift_time_subscripts(key, t): shift_time_subscripts(value, t) for key, value in obj.items()}
    elif isinstance(obj, MatrixSymbol):
        args = obj.args
        if isinstance(args[0], tuple):
            return MatrixSymbol(shift_time_subscripts(args[0], t), args[1], args[2])
        else:
            return deepcopy(obj)
    elif is_non_atomic_sympy_expr(obj):
        return obj.xreplace({matrix_symbol: shift_time_subscripts(matrix_symbol, t)
                             for matrix_symbol in matrix_symbols_to_shift})
    else:
        return deepcopy(obj)
 def backward_factor(self, t___list, observations___dict={}, max_t=0):
     T = max(max(observations___dict.keys()), max_t)
     if isinstance(t___list, int):
         t = t___list
         if t == T:
             state_var_symbol = {(self.state_var, t): self.observation_pdf(t).Vars[(self.state_var, t)]}
             if self.observation_pdf_template.is_discrete_finite():
                 var_values___frozen_dicts = self.observation_pdf(t).Params['NegLogP'].keys()
                 state_var_domain =\
                     set(frozendict({(self.state_var, t): var_values___frozen_dict[(self.state_var, t)]})
                         for var_values___frozen_dict in var_values___frozen_dicts)
                 return OnePMF(var_names_and_syms=state_var_symbol, var_names_and_values=state_var_domain,
                               cond={(self.state_var, t): None})
             else:
                 return OnePDF(cond={(self.observation_var, t): None})
         else:
             b = self.transition_pdf(t + 1) * self.observation_pdf(t + 1)
             if (t + 1) in observations___dict:
                 b = b.at({(self.observation_var, t + 1): observations___dict[t + 1]})
             b = (b * self.backward_factor(t + 1, observations___dict))\
                 .marg((self.state_var, t + 1))
             return b
     elif isinstance(t___list, (list, range, tuple)):
         d = {}
         t = T
         state_var_symbol = {(self.state_var, t): self.observation_pdf(t).Vars[(self.state_var, t)]}
         if self.observation_pdf_template.is_discrete_finite():
             var_values___frozen_dicts = self.observation_pdf(t).Param['NegLogP'].keys()
             state_var_domain =\
                 set(frozendict({(self.state_var, t): var_values___frozen_dict[(self.state_var, t)]})
                     for var_values___frozen_dict in var_values___frozen_dicts)
             b = {t: OnePMF(var_names_and_syms=state_var_symbol, var_names_and_values=state_var_domain,
                            cond={(self.state_var, t): None})}
         else:
             b = {t: OnePDF(cond={(self.observation_var, t): None})}
         if t in t___list:
             d[t] = b[t]
         for t in reversed(range(min(t___list), T)):
             b[t] = self.transition_pdf(t + 1) * self.observation_pdf(t + 1)
             if (t + 1) in observations___dict:
                 b[t] = b[t].at({(self.observation_var, t + 1): observations___dict[t + 1]})
             b[t] = (b[t] * b[t + 1])\
                 .marg((self.state_var, t + 1))
             if t in t___list:
                 d[t] = b[t]
         return d
def make_observations_joinable(daily_observations):
  return (
    frozendict({
      "Date": daily_observations["Date"],
      "WBAN": daily_observations["WBAN"],
    }),
    daily_observations
  )
Ejemplo n.º 25
0
 def to_hashable(self, x):
     if type(x) == dict:
         return frozendict(x)
     elif type(x) == list:
         #return set((self.to_hashable(i) for i in x))
         return frozenset(x)
     else:
         return x
Ejemplo n.º 26
0
def all_out_resources(exprs, rules):
  resources = {rule.resource for rule in rules} - {'m'}
  memory_cells = islice(repeat('m'), len(exprs))
  for i in xrange(len(exprs) + 1):
    for resource_list in permutations(resources, i):
      all_resource_lists = permutations(chain(resource_list, repeat('m', len(exprs) - i)))
      for all_resource_list in all_resource_lists:
        yield frozendict({expr: resource for (expr, resource) in zip(exprs, all_resource_list)})
Ejemplo n.º 27
0
def make(head, bindings=None, **kwargs):
    if bindings is None:
        bindings = {}
    bindings = frozendict(bindings, **kwargs)
    return quoted_term(
        head=quote(head),
        #can't use quote because the values of bindings are already quotations
        bindings=term.Term.from_dict_of_str(bindings)
    )
Ejemplo n.º 28
0
 def test_backrelate(self):
     class A(DataObject):
         pass
     
     @backrelate({'b':(A,True)})
     class B(DataObject):
         pass
     
     class C(B):
         pass
     
     class D(A):
         pass
     
     eq_(B.relationships, frozendict({}))
     eq_(A.relationships, frozendict({'b':(B,True)}))
     eq_(C.relationships, frozendict({}))
     eq_(D.relationships, frozendict({'b':(B,True)}))
Ejemplo n.º 29
0
 def bindings(self):
     return frozendict(
          head=quote(self.represents.head),
          bindings=quote(self.represents.bindings),
          _modifier=properties.both(
             has_id(quote(self.represents.id)),
             has_source(quote(self.represents.source))
         )
     )
Ejemplo n.º 30
0
    def __init__(self, state, state_group, prev_group=None, delta_ids=None):
        self.state = frozendict(state)
        self.state_group = state_group

        self.prev_group = prev_group
        self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None

        # The `state_id` is a unique ID we generate that can be used as ID for
        # this collection of state. Usually this would be the same as the
        # state group, but on worker instances we can't generate a new state
        # group each time we resolve state, so we generate a separate one that
        # isn't persisted and is used solely for caches.
        # `state_id` is either a state_group (and so an int) or a string. This
        # ensures we don't accidentally persist a state_id as a stateg_group
        if state_group:
            self.state_id = state_group
        else:
            self.state_id = _gen_state_id()
Ejemplo n.º 31
0
    par_dict={
        tau_leaf: 0.0027,
        tau_wood: 5e-05,
        tau_root: 0.002,
        tau_excessC: 0.05,
        tau_litter: 0.029,
        tau_cwd: 0.001,
        tau_soil: 1e-04,
        Q_h: 1.4,
        m_resp_frac: 0.5,
        DON_leach_prop: 0.0015,
        leach_rate: 0.00001  # day^{-1}
        ,
        nitr_rate: 0.0001  # day^{-1}
    },
    func_dict=frozendict({})
    # state_var_units= gC*m^{-2}
    # time_unit=day
)
in_fl_c = carbon_in_fluxes_by_symbol_1(c_in_t, xc)
internal_fl_c = carbon_internal_fluxes_by_symbol_1(A_c, xc)
out_fl_c = carbon_out_fluxes_by_symbol_1(A_c, xc)

in_fl_n = {N_labile: U_Nfix, N_NH4: Ndep_NH4, N_NO3: Ndep_NO3}
out_fl_n = {N_soil: L_DON, N_NH4: U_NH4, N_NO3: U_NO3 + L_NO3}
internal_fl_n = {
    (N_bud, N_leaf): a_budN2leaf,
    (N_bud, N_labile): a_budN2Ramain,
    (N_labile, N_bud): a_budN,
    (N_leaf, N_litter): t_leafN,
    (N_leaf, N_labile): t_retransN,
Ejemplo n.º 32
0
def freeze(item):
    item['fields'] = frozendict(item['fields'])
    return frozendict(item)
Ejemplo n.º 33
0
class SerializerForm(forms.Form):
    """
    Sync api serializer form errors to django form errors.
    """
    task_id = None
    action = None
    _serializer = None
    _api_method = frozendict({
        'create': 'POST',
        'update': 'PUT',
        'delete': 'DELETE',
        'get': 'GET'
    })
    _api_call = NotImplemented  # Set in descendant class
    _api_response = None
    _exclude_fields = frozenset()
    _custom_fields = frozendict()
    _custom_field_opts = frozendict()
    _custom_widgets = frozendict()
    _custom_widget_attrs = frozendict()
    _ignore_empty_fields = frozenset()
    _field_text_class = 'narrow input-transparent'
    _field_checkbox_class = 'normal-check'
    _field_select_class = 'narrow input-select2'
    _field_readonly_class = 'uneditable-input'

    def __init__(self, request, obj, *args, **kwargs):
        self._request = request
        self._obj = obj
        self._read_only_fields = set()
        init = kwargs.pop('init', False)

        # Initial data are useful only for updates, or enabled manually by param
        if (obj and request.POST.get('action', None) == 'update') or init:
            kwargs['initial'] = self._initial_data(request, obj)

        # Parent constructor
        super(SerializerForm, self).__init__(*args, **kwargs)

        # Copy serializer fields
        if self._serializer:
            for name, field in iteritems(self._serializer.base_fields):
                field_not_defined = name not in self.fields
                field_not_excluded = name not in self._exclude_fields
                if field_not_defined and field_not_excluded:
                    self.fields[name] = self._serializer_field(name, field)

        # Set fancy placeholder
        for key, field in self.fields.items():
            field.widget.attrs['placeholder'] = self._get_placeholder(
                field, key)

    def _get_placeholder(self, field, field_name, default=''):
        # Python circular imports
        from gui.utils import tags_to_string

        try:
            value = self.initial[field_name]
        except KeyError:
            value = field.widget.attrs.get('placeholder', default)
        else:
            if isinstance(value, (list, tuple)):
                value = tags_to_string(value)
            if value is None:
                return ''

        return text_type(value).replace('\r\n', ' ').replace('\n', ' ')

    # noinspection PyMethodMayBeStatic
    def _build_field(self, name, serializer_field, form_field_class,
                     **form_field_options):
        """Process converted field information and returns form field.
        Suitable for overriding in descendant classes.
        """
        return form_field_class(**form_field_options)

    def _serializer_field(self, name, field):
        """
        Convert serializer field to django form field.
        """
        opts = {
            'label': field.label,
            'help_text': field.help_text,
            'required': field.required,
        }

        if isinstance(
                field,
            (fields.ChoiceField, fields.IntegerChoiceField, RelatedField)):
            field_class = forms.ChoiceField
            widget_class = forms.Select
            widget_attrs = {'class': self._field_select_class}
            opts['required'] = False
            try:
                opts['choices'] = field.choices
            except AttributeError:
                pass

            try:
                if isinstance(field, fields.IntegerChoiceField) or isinstance(
                        opts['choices'][0][0], int):
                    field_class = forms.TypedChoiceField
                    opts['coerce'] = int
                    opts['empty_value'] = None
            except (KeyError, IndexError, TypeError):
                pass

        elif isinstance(field, fields.BooleanField):
            field_class = forms.BooleanField
            widget_class = forms.CheckboxInput
            widget_attrs = {'class': self._field_checkbox_class}
            opts['required'] = False

        else:
            widget_class = forms.TextInput
            widget_attrs = {'class': self._field_text_class}
            field_class = forms.CharField

            if isinstance(field, fields.IntegerField):
                field_class = forms.IntegerField
                widget_class = NumberInput

                if field.help_text and '(MB)' in field.help_text:
                    widget_class = ByteSizeInput
                    widget_attrs['class'] += ' ' + 'input-mbytes'

            elif isinstance(field, fields.BaseArrayField):
                if isinstance(field, fields.IntegerArrayField):
                    field_class = IntegerArrayField
                else:
                    field_class = ArrayField
                widget_class = ArrayWidget
            elif isinstance(field, fields.BaseDictField):
                field_class = DictField
                widget_class = DictWidget

            if field.read_only:
                widget_attrs['class'] += ' ' + self._field_readonly_class

        if opts['required']:
            widget_attrs['required'] = 'required'

        if field.read_only:
            widget_attrs['disabled'] = 'disabled'
            self._read_only_fields.add(name)

        field_class = self._custom_fields.get(name, field_class)
        widget_class = self._custom_widgets.get(name, widget_class)
        widget_attrs.update(self._custom_widget_attrs.get(name, {}))
        opts.update(self._custom_field_opts.get(name, {}))
        opts['widget'] = widget_class(attrs=widget_attrs)

        return self._build_field(name, field, field_class, **opts)

    @staticmethod
    def _blank(value):
        """Return empty string instead of None"""
        if not value:
            return ''
        return value

    @staticmethod
    def _null(value):
        """Return None instead of empty string"""
        if not value:
            return None
        return value

    def _initial_data(self, request, obj):
        """Data initialized from DB model object"""
        if self._serializer:
            if hasattr(self._serializer,
                       '_model_'):  # InstanceSerializer(request, instance)
                # noinspection PyCallingNonCallable
                return self._serializer(self._request, obj).data
            else:  # class Serializer(instance)
                # noinspection PyCallingNonCallable
                return self._serializer(obj).data
        return {}

    def _input_data(self):
        """Data collected from form"""
        return self.cleaned_data

    def _final_data(self, data=None):
        """Data that are send to API for validation (in create _input_data, in update _has_changed data)"""
        if data is None:
            data = self._input_data()
        return data

    def _has_changed(self):
        """Parse _input_data from form and compare with _initial_data from DB and return data that has changed"""
        ret = {}

        for key, val in self._input_data().items():
            if key in self._read_only_fields:
                continue

            if key in self._ignore_empty_fields and not val:
                logger.debug(
                    'SerializerForm._has_changed [%s]: %s (%s) is empty and will be ignored',
                    key, val, type(val))
                continue

            try:
                initial_val = self.initial[key]
            except KeyError:
                ret[key] = val
                logger.debug(
                    'SerializerForm._has_changed [%s]: %s (%s) is missing in initial data',
                    key, val, type(val))
            else:
                if initial_val != val:
                    ret[key] = val
                    logger.debug(
                        'SerializerForm._has_changed [%s]: %s (%s) != %s (%s)',
                        key, initial_val, type(initial_val), val, type(val))

        return ret

    def _set_api_task_id(self, data):
        """Set task_id from TaskResponse"""
        # noinspection PyBroadException
        try:
            self.task_id = data['task_id']
        except:
            pass

    def _set_custom_api_errors(self, errors):
        pass

    def _add_error(self, field_name, error):
        if field_name not in self._errors:
            self._errors[field_name] = self.error_class()

        if isinstance(error, (list, tuple)):
            self._errors[field_name].extend(error)
        else:
            self._errors[field_name].append(error)

    def _set_api_errors(self, data):
        # errors is a dict error output from API
        if not data or not isinstance(data, dict):
            return None

        errors = data.get('result', data)

        if isinstance(errors, dict):  # Classic serializer error task output
            # Pair API errors to Django form errors
            for field in self.fields:
                if field in errors:
                    self._add_error(field, errors[field])  # should be lazy
                    try:
                        del self.cleaned_data[field]
                    except KeyError:
                        pass

            if 'non_field_errors' in errors:
                self._add_error(NON_FIELD_ERRORS,
                                errors['non_field_errors'])  # should be lazy
            elif 'detail' in errors:
                self._add_error(NON_FIELD_ERRORS,
                                ugettext(errors['detail']))  # should be noop

        else:  # More serious api error
            if isinstance(
                    errors,
                    list):  # Maybe we have errors from multiple serializers
                for err in errors:
                    self._set_api_errors(err)
            elif errors:
                self._add_error(NON_FIELD_ERRORS, errors)

        self._set_custom_api_errors(errors)

    # noinspection PyUnusedLocal
    @classmethod
    def api_call(cls, action, obj, request, args=(), data=()):
        method = cls._api_method[action]
        logger.info('Calling API view %s %s(%s, data=%s) by user %s in DC %s',
                    method, cls._api_call.__name__, args, data, request.user,
                    request.dc)
        return call_api_view(request,
                             method,
                             cls._api_call.__func__,
                             *args,
                             data=dict(data),
                             log_response=True)

    def save(self, action=None, args=()):
        # For security reasons you can limit action from view
        if not action:
            action = self.data.get('action')

        # Save action
        self.action = action

        # For security reasons data must have only cleaned_data for this form
        if action == 'update':
            data = self._final_data(self._has_changed())
            if not data:  # Nothing changed
                return 204
        else:
            data = self._final_data()

        # Saving socket.io session ID. Used when sending messages about hostname change, so we don't send useless
        # message to this user
        self._request.siosid = self.data.get('siosid', None)

        # Call API function (also updates tasklog)
        res = self.api_call(action,
                            self._obj,
                            self._request,
                            args=args,
                            data=data)
        self._api_response = res.data

        if res.status_code in (200, 201):
            self._set_api_task_id(res.data)
        else:
            self._set_api_errors(res.data)

        return res.status_code

    def set_error(self, key, value):
        """Set custom error"""
        if self._errors is None:
            self._errors = {}

        self._add_error(key, value)
Ejemplo n.º 34
0
    sign_omnija
from signingscript.exceptions import SigningServerError
from signingscript.utils import is_autograph_signing_format

log = logging.getLogger(__name__)

FORMAT_TO_SIGNING_FUNCTION = frozendict({
    # TODO: Remove the next item (in favor of the regex one), once Focus is migrated
    "autograph_focus": sign_jar,
    "autograph_apk_.+": sign_jar,
    "autograph_hash_only_mar384(:\\w+)?": sign_mar384_with_autograph_hash,
    "autograph_stage_mar384(:\\w+)?": sign_mar384_with_autograph_hash,
    "gpg": sign_gpg,
    "autograph_gpg": sign_gpg_with_autograph,
    "jar": sign_jar,
    "focus-jar": sign_jar,
    "macapp": sign_macapp,
    "osslsigncode": sign_signcode,
    "sha2signcode": sign_signcode,
    # sha2signcodestub uses a generic sign_file
    "signcode": sign_signcode,
    "widevine": sign_widevine,
    "autograph_widevine": sign_widevine,
    "autograph_omnija": sign_omnija,
    "default": sign_file,
})


# task_cert_type {{{1
def task_cert_type(context):
    """Extract task certificate type.
Ejemplo n.º 35
0
    def upload_image(
        self: api_client.Api,
        body: typing.Union[SchemaForRequestBodyMultipartFormData,
                           Unset] = unset,
        path_params: RequestPathParams = frozendict(),
        content_type: str = 'multipart/form-data',
        accept_content_types: typing.Tuple[str] = _all_accept_content_types,
        stream: bool = False,
        timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
        skip_deserialization: bool = False,
    ) -> typing.Union[ApiResponseFor200,
                      api_client.ApiResponseWithoutDeserialization]:
        """
        uploads an image
        :param skip_deserialization: If true then api_response.response will be set but
            api_response.body and api_response.headers will not be deserialized into schema
            class instances
        """
        self._verify_typed_dict_inputs(RequestPathParams, path_params)

        _path_params = {}
        for parameter in (request_path_pet_id, ):
            parameter_data = path_params.get(parameter.name, unset)
            if parameter_data is unset:
                continue
            serialized_data = parameter.serialize(parameter_data)
            _path_params.update(serialized_data)

        _headers = HTTPHeaderDict()
        # TODO add cookie handling
        if accept_content_types:
            for accept_content_type in accept_content_types:
                _headers.add('Accept', accept_content_type)

        _fields = None
        _body = None
        if body is not unset:
            serialized_data = request_body_body.serialize(body, content_type)
            _headers.add('Content-Type', content_type)
            if 'fields' in serialized_data:
                _fields = serialized_data['fields']
            elif 'body' in serialized_data:
                _body = serialized_data['body']
        response = self.api_client.call_api(
            resource_path=_path,
            method=_method,
            path_params=_path_params,
            headers=_headers,
            fields=_fields,
            body=_body,
            auth_settings=_auth,
            stream=stream,
            timeout=timeout,
        )

        if skip_deserialization:
            api_response = api_client.ApiResponseWithoutDeserialization(
                response=response)
        else:
            response_for_status = _status_code_to_response.get(
                str(response.status))
            if response_for_status:
                api_response = response_for_status.deserialize(
                    response, self.api_client.configuration)
            else:
                api_response = api_client.ApiResponseWithoutDeserialization(
                    response=response)

        if not 200 <= response.status <= 299:
            raise exceptions.ApiException(api_response=api_response)

        return api_response
Ejemplo n.º 36
0
DEFAULT_CONFIG = frozendict({
    "taskcluster_root_url": "https://taskcluster.net",
    # Worker identification
    "provisioner_id": "test-dummy-provisioner",
    "worker_group": "test-dummy-workers",
    "worker_type": "dummy-worker-myname",
    "worker_id": os.environ.get("SCRIPTWORKER_WORKER_ID", "dummy-worker-myname1"),

    "credentials": frozendict({
        "clientId": "...",
        "accessToken": "...",
        "certificate": "...",
    }),

    # Worker log settings
    "log_datefmt": "%Y-%m-%dT%H:%M:%S",
    "log_fmt": "%(asctime)s %(levelname)8s - %(message)s",
    "watch_log_file": False,

    # intervals are expressed in seconds
    "task_max_timeout": 60 * 20,
    "reclaim_interval": 300,
    "poll_interval": 10,
    "sign_key_timeout": 60 * 2,

    "reversed_statuses": frozendict({
        -11: STATUSES['intermittent-task'],
        -15: STATUSES['intermittent-task'],
    }),
    # Report this status on max_timeout. `intermittent-task` will rerun the
    # task automatically. `internal-error` or other will require manual
    # intervention.
    "task_max_timeout_status": STATUSES['intermittent-task'],
    "invalid_reclaim_status": STATUSES['intermittent-task'],

    "task_script": ("bash", "-c", "echo foo && sleep 19 && exit 1"),

    "verbose": True,

    # Task settings
    "work_dir": "...",
    "log_dir": "...",
    "artifact_dir": "...",
    "task_log_dir": "...",  # set this to ARTIFACT_DIR/public/logs
    "artifact_upload_timeout": 60 * 20,
    "aiohttp_max_connections": 15,

    # chain of trust settings
    "sign_chain_of_trust": True,
    "verify_chain_of_trust": False,  # TODO True
    "verify_cot_signature": False,
    "cot_job_type": "unknown",  # e.g., signing
    "cot_product": "firefox",
    "cot_version": 3,
    "min_cot_version": 2,
    "max_chain_length": 20,
    # Calls to Github API are limited to 60 an hour. Using an API token allows to raise the limit to
    # 5000 per hour. https://developer.github.com/v3/#rate-limiting
    "github_oauth_token": "",

    # ed25519 settings
    "ed25519_private_key_path": "...",
    "ed25519_public_keys": frozendict({
        "docker-worker": tuple([
            'J+PAKmq3jkS2uCpBk5WU2ycrnTFPwZujJT4OHAxm38I=',
        ]),
        "generic-worker": tuple([
            '6UPrVTyw0EPQV7bCEMXo+5jNR4clbK55JWG74bBJHZQ=',
        ]),
        "scriptworker": tuple([
            'DaEKQ79ZC/X+7O8zwm8iyhwTlgyjRSi/TDd63fh2JG0=',
        ]),
    }),

    "project_configuration_url": "https://hg.mozilla.org/ci/ci-configuration/raw-file/default/projects.yml",
    "pushlog_url": "{repo}/json-pushes?changeset={revision}&tipsonly=1&version=2&full=1",

    "chain_of_trust_hash_algorithm": "sha256",
    "cot_schema_path": os.path.join(os.path.dirname(__file__), "data", "cot_v1_schema.json"),

    # for download url validation.  The regexes need to define a 'filepath'.
    'valid_artifact_rules': (frozendict({
        "schemes": ("https", ),
        "netlocs": ("queue.taskcluster.net", ),
        "path_regexes": (r"^/v1/task/(?P<taskId>[^/]+)(/runs/\\d+)?/artifacts/(?P<filepath>.*)$", ),
    }), ),

    # scriptworker identification
    "scriptworker_worker_pools": (
        "scriptworker-prov-v1/balrogworker-v1",
        "scriptworker-prov-v1/beetmoverworker-v1",
        "scriptworker-prov-v1/pushapk-v1",
        "scriptworker-prov-v1/signing-linux-v1",
        "scriptworker-prov-v1/treescriptworker-v1",
        "scriptworker-k8s/gecko-1-balrog",
        "scriptworker-k8s/gecko-3-balrog",
        "scriptworker-k8s/gecko-1-beetmover",
        "scriptworker-k8s/gecko-3-beetmover",
        "scriptworker-k8s/gecko-1-pushapk",
        "scriptworker-k8s/gecko-3-pushapk",
        "scriptworker-k8s/gecko-1-signing",
        "scriptworker-k8s/gecko-3-signing",
        "scriptworker-k8s/gecko-t-signing",
        "scriptworker-k8s/gecko-1-tree",
        "scriptworker-k8s/gecko-3-tree",
    ),
    "scriptworker_provisioners": (
        "scriptworker-prov-v1",
        "scriptworker-k8s",
    ),

    # valid hash algorithms for chain of trust artifacts
    "valid_hash_algorithms": (
        "sha256",
        "sha512",
    ),

    # decision task cot
    "valid_decision_worker_pools": frozendict({
        "by-cot-product": frozendict({
            "firefox": (
                "aws-provisioner-v1/gecko-1-decision",
                "aws-provisioner-v1/gecko-2-decision",
                "aws-provisioner-v1/gecko-3-decision",
            ),
            "thunderbird": (
                "aws-provisioner-v1/gecko-1-decision",
                "aws-provisioner-v1/gecko-2-decision",
                "aws-provisioner-v1/gecko-3-decision",
            ),
            "mobile": (
                # gecko-focus was for mozilla-mobile releases (bug 1455290) for more details.
                # TODO: Remove it once not used anymore
                "aws-provisioner-v1/gecko-focus",
                "aws-provisioner-v1/mobile-1-decision",
                # We haven't had the need for mobile-2-decision yet
                # https://bugzilla.mozilla.org/show_bug.cgi?id=1512631#c6
                "aws-provisioner-v1/mobile-3-decision",
            ),
            "application-services": (
                "aws-provisioner-v1/app-services-1-decision",
                "aws-provisioner-v1/app-services-3-decision",
            ),
        }),
    }),

    # docker-image cot
    "valid_docker_image_worker_pools": frozendict({
        "by-cot-product": frozendict({
            "firefox": (
                "aws-provisioner-v1/gecko-1-images",
                "aws-provisioner-v1/gecko-2-images",
                "aws-provisioner-v1/gecko-3-images",
            ),
            "thunderbird": (
                "aws-provisioner-v1/gecko-1-images",
                "aws-provisioner-v1/gecko-2-images",
                "aws-provisioner-v1/gecko-3-images",
            ),
            "mobile": (
                "aws-provisioner-v1/mobile-1-images",  # there is no mobile level 2.
                "aws-provisioner-v1/mobile-3-images",
            ),
            "application-services": (
                "aws-provisioner-v1/app-services-1-images",
                "aws-provisioner-v1/app-services-3-images",
            ),
        }),
    }),


    # for trace_back_to_*_tree.  These repos have access to restricted scopes;
    # all other repos are relegated to CI scopes.
    'trusted_vcs_rules': {
        'by-cot-product': frozendict({
            'firefox': (frozendict({
                "schemes": ("https", "ssh", ),
                "netlocs": ("hg.mozilla.org", ),
                "path_regexes": (
                    r"^(?P<path>/mozilla-(central|unified))(/|$)",
                    r"^(?P<path>/integration/(autoland|fx-team|mozilla-inbound))(/|$)",
                    r"^(?P<path>/releases/mozilla-(beta|release|esr\d+))(/|$)",
                    r"^(?P<path>/projects/(birch|jamun|maple|oak))(/|$)",
                ),
            }),),
            # XXX We should also check the mozilla-central tree that is being used.
            'thunderbird': (frozendict({
                "schemes": ("https", "ssh", ),
                "netlocs": ("hg.mozilla.org", ),
                "path_regexes": (
                    r"^(?P<path>/comm-central)(/|$)",
                    r"^(?P<path>/releases/comm-(beta|esr\d+))(/|$)",
                ),
            }),),
            'mobile': (frozendict({
                "schemes": ("https", "ssh", ),
                "netlocs": ("github.com", ),
                "path_regexes": tuple([
                    r"^(?P<path>/mozilla-mobile/(?:android-components|focus-android|reference-browser|fenix|firefox-tv))(/|.git|$)",
                ]),
            }),),
            'application-services': (frozendict({
                "schemes": ("https", "ssh", ),
                "netlocs": ("github.com", ),
                "path_regexes": (
                    r"^(?P<path>/mozilla/application-services)(/|.git|$)",
                ),
            }),),
        }),
    },

    'valid_tasks_for': {
        'by-cot-product': frozendict({
            'firefox': ('hg-push', 'cron', 'action',),
            'thunderbird': ('hg-push', 'cron', 'action',),
            'mobile': (
                'cron',
                # On staging releases, level 1 docker images may be built in the pull-request graph
                'github-pull-request',
                # Similarly, docker images can be built on regular push. This is usually the case
                # for level 3 images
                'github-push',
                'github-release',
            ),
            'application-services': (
                # On staging releases, level 1 docker images may be built in the pull-request graph
                'github-pull-request',
                # Similarly, docker images can be built on regular push. This is usually the case
                # for level 3 images
                'github-release',
            ),
        }),
    },

    'official_github_repos_owner': {
        'by-cot-product': frozendict({
            'firefox': '',
            'thunderbird': '',
            'mobile': 'mozilla-mobile',
            'application-services': 'mozilla',
        }),
    },

    # Map scopes to restricted-level
    'cot_restricted_scopes': {
        'by-cot-product': frozendict({
            'firefox': frozendict({
                'project:releng:addons.mozilla.org:server:production': 'all-release-branches',

                'project:releng:balrog:server:nightly': 'all-nightly-branches',
                'project:releng:balrog:server:beta': 'beta',
                'project:releng:balrog:server:release': 'release',
                'project:releng:balrog:server:esr': 'esr',

                'project:releng:beetmover:bucket:nightly': 'all-nightly-branches',
                'project:releng:beetmover:bucket:release': 'all-release-branches',

                'project:releng:bouncer:server:production': 'all-production-branches',

                # Fennec rides the 68 train. This means, Fennec Nightly will first be shipped off
                # mozilla-central then mozilla-esr68. Fennec Beta and Release will move to
                # mozilla-esr68 too.
                'project:releng:googleplay:aurora': 'esr68',
                'project:releng:googleplay:beta': 'esr68',
                'project:releng:googleplay:release': 'esr68',

                'project:releng:signing:cert:nightly-signing': 'all-nightly-branches',
                'project:releng:signing:cert:release-signing': 'all-release-branches',

                'project:releng:snapcraft:firefox:beta': 'beta-or-release',     # Needed on release for RCs
                'project:releng:snapcraft:firefox:candidate': 'release',
                'project:releng:snapcraft:firefox:esr': 'esr',

                'project:releng:ship-it:production': 'all-production-branches',

                'project:releng:treescript:action:push': 'all-release-branches',
            }),
            'thunderbird': frozendict({
                'project:comm:thunderbird:releng:balrog:server:nightly': 'all-nightly-branches',
                'project:comm:thunderbird:releng:balrog:server:beta': 'beta',
                'project:comm:thunderbird:releng:balrog:server:esr': 'esr',

                'project:comm:thunderbird:releng:beetmover:bucket:nightly': 'all-nightly-branches',
                'project:comm:thunderbird:releng:beetmover:bucket:release': 'all-release-branches',

                'project:comm:thunderbird:releng:bouncer:server:production': 'all-release-branches',

                'project:comm:thunderbird:releng:signing:cert:nightly-signing': 'all-nightly-branches',
                'project:comm:thunderbird:releng:signing:cert:release-signing': 'all-release-branches',
            }),
            'mobile': frozendict({
                'project:mobile:android-components:releng:beetmover:bucket:maven-production': 'android-components-repo',
                'project:mobile:android-components:releng:beetmover:bucket:maven-snapshot-production': 'android-components-repo',

                'project:mobile:fenix:releng:googleplay:product:fenix': 'fenix-repo',
                'project:mobile:fenix:releng:signing:cert:nightly-signing': 'fenix-repo',
                'project:mobile:fenix:releng:signing:cert:beta-signing': 'fenix-repo',
                'project:mobile:fenix:releng:signing:cert:production-signing': 'fenix-repo',

                'project:mobile:focus:googleplay:product:focus': 'focus-repo',
                'project:mobile:focus:releng:signing:cert:release-signing': 'focus-repo',

                'project:mobile:reference-browser:releng:googleplay:product:reference-browser': 'reference-browser-repo',
                'project:mobile:reference-browser:releng:signing:cert:release-signing': 'reference-browser-repo',

                'project:mobile:firefox-tv:releng:googleplay:product:firefox-tv': 'firefox-tv-repo',
                'project:mobile:firefox-tv:releng:signing:cert:production-signing': 'firefox-tv-repo',
            }),
            'application-services': frozendict({
                'project:mozilla:application-services:releng:beetmover:bucket:maven-production': 'application-services-repo',
            }),
        }),
    },
    # Map restricted-level to trees
    'cot_restricted_trees': {
        'by-cot-product': frozendict({
            'firefox': frozendict({
                # Which repos can perform release actions?
                # XXX remove /projects/maple and birch when taskcluster relpro
                #     migration is tier1 and landed on mozilla-central
                # XXX remove /projects/jamun when we no longer run staging releases
                #     from it
                'all-release-branches': (
                    "/releases/mozilla-beta",
                    "/releases/mozilla-release",
                    "/releases/mozilla-esr60",
                    "/releases/mozilla-esr68",
                    "/projects/birch",
                    "/projects/jamun",
                    "/projects/maple",
                ),
                # Limit things like pushapk to just these branches
                'release': (
                    "/releases/mozilla-release",
                ),
                'beta': (
                    "/releases/mozilla-beta",
                ),
                'beta-or-release': (
                    "/releases/mozilla-beta",
                    "/releases/mozilla-release",
                ),
                'esr': (
                    "/releases/mozilla-esr60",
                    "/releases/mozilla-esr68",
                ),
                'esr68': (
                    "/releases/mozilla-esr68",
                ),
                'nightly': (
                    "/mozilla-central",
                ),

                # Which repos can do nightly signing?
                # XXX remove /projects/maple and birch when taskcluster relpro
                #     migration is tier1 and landed on mozilla-central
                # XXX remove /projects/jamun when we no longer run staging releases
                #     from it
                # XXX remove /projects/oak when we no longer test updates against it
                'all-nightly-branches': (
                    "/mozilla-central",
                    "/releases/mozilla-unified",
                    "/releases/mozilla-beta",
                    "/releases/mozilla-release",
                    "/releases/mozilla-esr60",
                    "/releases/mozilla-esr68",
                    "/projects/birch",
                    "/projects/jamun",
                    "/projects/oak",
                    "/projects/maple",
                ),

                'all-production-branches': (
                    "/mozilla-central",
                    "/releases/mozilla-beta",
                    "/releases/mozilla-release",
                    "/releases/mozilla-esr60",
                    "/releases/mozilla-esr68",
                ),

                'all-staging-branches': (
                    "/projects/birch",
                    "/projects/jamun",
                    "/projects/maple",
                ),
            }),
            'thunderbird': frozendict({
                'all-release-branches': (
                    "/releases/comm-beta",
                    "/releases/comm-esr60",
                    "/releases/comm-esr68",
                ),
                'beta': (
                    "/releases/comm-beta",
                ),
                'esr': (
                    "/releases/comm-esr60",
                    "/releases/comm-esr68",
                ),
                'all-nightly-branches': (
                    "/comm-central",
                    "/releases/comm-beta",
                    "/releases/comm-esr60",
                    "/releases/comm-esr68",
                ),
                'nightly': (
                    "/comm-central",
                ),
            }),
            'mobile': frozendict({
                'fenix-repo': (
                    '/mozilla-mobile/fenix',
                ),
                'focus-repo': (
                    '/mozilla-mobile/focus-android',
                ),
                'android-components-repo': (
                    '/mozilla-mobile/android-components',
                ),
                'reference-browser-repo': (
                    '/mozilla-mobile/reference-browser',
                ),
                'firefox-tv-repo': (
                    '/mozilla-mobile/firefox-tv',
                ),
            }),
            'application-services': frozendict({
                'application-services-repo': (
                    '/mozilla/application-services',
                )
            }),
        }),
    },
    'prebuilt_docker_image_task_types': {
        'by-cot-product': frozendict({
            'firefox': ('decision', 'action', 'docker-image'),
            'thunderbird': ('decision', 'action', 'docker-image'),
            'mobile': 'any',  # all allowed
            'application-services': 'any',  # all allowed
        }),
    },
    'source_env_prefix': {
        'by-cot-product': frozendict({
            'firefox': 'GECKO',
            'thunderbird': 'COMM',
            'mobile': 'MOBILE',
            'application-services': 'APPSERVICES',
        })
    },
})
Ejemplo n.º 37
0
 def _draw_contour(self, x, y, a=1., b=1., c=0.,
                   levels=(1,), level_kwargs: EllipseCommand._LevelKwargsType = (frozendict(),),
                   _options=frozendict(), zorder=0, **options: Optionable._OptionsType):
     return EllipseCommand(*map(self._num_fmt, (x, y, a, b, c)),
                           levels=levels, level_kwargs=level_kwargs, options={**options, **_options}, zorder=zorder)
Ejemplo n.º 38
0
Archivo: utils.py Proyecto: fly2mars/TO
def freeze_dict(obj):
    return frozendict((k, freeze(v)) for k, v in obj.items())
def solve_mouselab(formula, env, to_what=None):
    """
    Use the program found by the PLP method to act once in a particular 
    instatiation of the Mouselab MDP.

    Parameters
    ----------
    formula : [ StateActionProgram ]
    env : Trial
    to_what : [ int ]
        To what state reset the environment

    Returns
    -------
    actions : ( int )
        A tuple of taken actions
    reward : int
    state_actions_distribution : dict
        frozendict([ int ] : [ int ]): [ int ]
            A set of admissable actions for observed states written as [node_id] 
            : [value]
    """
    env.reset_observations(to_what=to_what)

    state = env
    actions = [ac for ac in range(NUM_ACTIONS)]
    clicks = tuple()
    state_actions_distribution = {}

    while 1:

        if (len(clicks) > 0 and clicks[-1] == TERM) or \
            len(clicks) > NUM_ACTIONS:
            break

        all_bad = True
        admissable_actions = []
        shuffle(actions)

        for act in actions:
            if formula(state, act):
                all_bad = False
                admissable_actions.append(act)

        if all_bad:
            admissable_actions = [0]

        choice = randint(0, len(admissable_actions) - 1)
        act = admissable_actions[choice]
        clicks = clicks + (act, )
        observed = [n.label for n in state.observed_nodes if n.label != 0]
        observed_vals = [n.value for n in state.observed_nodes if n.label != 0]
        froze_state = frozendict(
            {k: v
             for k, v in zip(observed, observed_vals)})
        froze_action = frozenset(admissable_actions)
        state_actions_distribution[froze_state] = froze_action

        if act not in [n.label for n in state.observed_nodes]:
            state.node_map[act].observe()

    best_path = [
        node for node in env.node_map.values()
        if is_on_highest_expected_value_path(state, node.label)
    ]
    depths = {i: [] for i in state.level_map.keys()}

    for node in best_path:
        depths[node.depth].append(node)

    best_path = []
    for dep, nodes in depths.items():
        best_path.append(nodes[0].value)

    reward = sum(best_path) - len(clicks) + 1
    if len(set(clicks)) != len(clicks):
        reward = 1e-10  ## penalty for clicking observed nodes;
        ## the formula is no good if it does that

    return clicks, reward, state_actions_distribution
Ejemplo n.º 40
0

class SlowAugs(str, Enum):
    rotate = "rotate"
    shift_scale_rotate = "shift_scale_rotate"
    shift_hsv = "shift_hsv"
    equalize = "equalize"
    to_gray = "to_gray"
    resize512 = "resize512"
    resize300 = "resize300"
    resize256 = "resize256"
    resize224 = "resize224"
    contrast = "contrast"
    crop = "crop",
    bright = "bright"


SLOW_AUGS_DICT = frozendict(
    rotate=Rotate(always_apply=True),
    shift_scale_rotate=ShiftScaleRotate(always_apply=True),
    shift_hsv=HueSaturationValue(always_apply=True),
    equalize=Equalize(always_apply=True),
    to_gray=ToGray(always_apply=True),
    resize512=Resize(512, 512, always_apply=True),
    resize300=Resize(300, 300, always_apply=True),
    resize256=Resize(256, 256, always_apply=True),
    resiz224=Resize(224, 224, always_apply=True),
    contrast=RandomContrast(always_apply=True),
    crop=RandomCrop(64, 64, always_apply=True),
    bright=RandomBrightness(always_apply=True))
Ejemplo n.º 41
0
def get_permission_groups() -> frozendict:
    """Get all defined permission groups.
    """
    return frozendict(_groups)
Ejemplo n.º 42
0
 def parse_input(my_input):
     return frozendict({
         int(tile_data.split(':\n')[0].split(' ')[1]):
         tuple(tile_data.split(':\n')[1].split('\n'))
         for tile_data in my_input.strip('\n').split('\n\n')
     })
Ejemplo n.º 43
0
import json

from vms.models import Vm, Image, SnapshotDefine, Snapshot, BackupDefine, Backup, TagVm, Subnet
from gui.utils import get_order_by, get_pager
from gui.exceptions import HttpRedirectException
from gui.dc.views import dc_switch
from api.vm.utils import get_vms as api_get_vms, get_vm as api_get_vm
from api.vm.define.serializers import VmDefineSerializer, VmDefineDiskSerializer, VmDefineNicSerializer
from api.vm.define.views import vm_define, vm_define_nic, vm_define_disk
from api.vm.snapshot.vm_snapshot_list import VmSnapshotList
from api.vm.backup.vm_backup_list import VmBackupList
from api.utils.views import call_api_view

logger = getLogger(__name__)

REVERSE_OSTYPES = frozendict((ostype, key) for key, ostype in Vm.OSTYPE)


def get_vm(request, hostname, exists_ok=True, noexists_fail=True, auto_dc_switch=True, sr=('dc', 'owner')):
    """
    Get VM object or raise 404.
    """
    if auto_dc_switch:
        try:
            vm = Vm.objects.filter(slavevm__isnull=True).select_related(*sr).get(hostname=hostname)
        except Vm.DoesNotExist:
            raise Http404
        else:
            if vm.dc != request.dc:
                # Switch to VM's DC -> will return False if user is not allowed to switch to DC
                if dc_switch(request, vm.dc.name):
Ejemplo n.º 44
0
        return self.get_hexes_of_distance(3, exclude_river)


@dataclass
class BridgeSlot(Location):
    connected_hexes: Tuple[Hex, Hex]


@dataclass
class Map:
    locations: frozendict = frozendict()
    hexes: Tuple[Hex, ...] = ()
    bridge_slots: Tuple[BridgeSlot, ...] = ()

    def __init__(self, hexes: Tuple[Hex, ...], bridge_slots: Tuple[BridgeSlot,
                                                                   ...]):
        self.hexes = hexes
        self.bridge_slots = bridge_slots
        self.locations = {x.location_id: x for x in hexes + bridge_slots}


_adjacent_terrains = frozendict({
    Terrain.MOUNTAIN: (Terrain.WASTELAND, Terrain.DESERT),
    Terrain.DESERT: (Terrain.MOUNTAIN, Terrain.FIELD),
    Terrain.FIELD: (Terrain.DESERT, Terrain.SWAMP),
    Terrain.SWAMP: (Terrain.FIELD, Terrain.LAKE),
    Terrain.LAKE: (Terrain.SWAMP, Terrain.FOREST),
    Terrain.FOREST: (Terrain.LAKE, Terrain.WASTELAND),
    Terrain.WASTELAND: (Terrain.FOREST, Terrain.MOUNTAIN)
})
Ejemplo n.º 45
0
def interpret_config_file(filename: str) -> ConfigInfo:
    """
    Returns a ConfigInfo.
    """
    try:
        basename = os.path.basename(filename)
        base = basename.replace(SUFFIX, "")
        # now we have something like
        #   package-node.config_name.date
        # or
        #   package-node.config_name
        if not "." in base:
            msg = f"Invalid filename {filename!r}."
            raise dtu.DTConfigException(msg)

        tokens = base.split(".")
        if len(tokens) > 3:
            msg = f"Too many periods/tokens (tokens={tokens})"
            raise dtu.DTConfigException(msg)

        if len(tokens) <= 2:
            #  package-node.config_name
            package_node = tokens[0]
            if not "-" in package_node:
                msg = f'Expected a "-" in "{package_node}".'
                raise dtu.DTConfigException(msg)
            i = package_node.index("-")
            package_name = package_node[:i]
            node_name = package_node[i + 1 :]
        else:
            package_name = node_name = None  # FIXME: should we bail?

        config_name = tokens[1]

        if len(tokens) == 3:
            # package-node.config_name.date
            date_effective = tokens[2]
        else:
            date_effective = "20170101"

        try:
            date_effective = parse(date_effective)
        except:
            msg = f'Cannot interpret "{date_effective}" as a date.'
            raise dtu.DTConfigException(msg)

        # now read file

        with open(filename) as f:
            contents = f.read()
        try:
            try:
                data = yaml.load(contents, Loader=yaml.Loader)
            except YAMLError as e:
                dtu.raise_wrapped(dtu.DTConfigException, e, "Invalid YAML", compact=True)
                raise
            if not isinstance(data, dict):
                msg = "Expected a dictionary inside."
                raise dtu.DTConfigException(msg)

            for field in ["description", "values"]:
                if not field in data:
                    msg = f'Missing field "{field}".'
                    raise dtu.DTConfigException(msg)

            description = data.pop("description")
            if not isinstance(description, str):
                msg = f'I expected that "description" is a string, obtained {description!r}.'
                raise dtu.DTConfigException(msg)

            extends = data.pop("extends", [])
            if not isinstance(extends, list):
                msg = f'I expected that "extends" is a list, obtained {extends!r}.'
                raise dtu.DTConfigException(msg)

            values = data.pop("values")
            if not isinstance(values, dict):
                msg = f'I expected that "values" is a dictionary, obtained {type(values)}.'
                raise dtu.DTConfigException(msg)

            # Freeze the data
            extends = tuple(extends)
            values = frozendict(values)

        except dtu.DTConfigException as e:
            msg = "Could not interpret the contents of the file\n"
            msg += f"   {dtu.friendly_path(filename)}\n"
            msg += "Contents:\n" + dtu.indent(contents, " > ")
            dtu.raise_wrapped(dtu.DTConfigException, e, msg, compact=True)
            raise

        return ConfigInfo(
            filename=filename,
            package_name=package_name,
            node_name=node_name,
            config_name=config_name,
            date_effective=date_effective,
            extends=extends,
            description=description,
            values=values,
            # not decided
            valid=None,
            error_if_invalid=None,
        )

    except dtu.DTConfigException as e:
        msg = f"Invalid file {dtu.friendly_path(filename)}"
        dtu.raise_wrapped(dtu.DTConfigException, e, msg, compact=True)
Ejemplo n.º 46
0
car = { 'brand':'Honda', 'model': 'Jazz', 'year' : 2017}
type(car)
car
#access
car['brand']
car['year']
car.get('year')
dir(car)
car['brand'] = 'MARUTI'
car  #mutable , value can be changed

#%%frozen Dictionaries
#pip install frozendict   #install this library
from frozendict import frozendict
fd = frozendict({ 'brand': 'Honda' })
fd
fd['brand'] = 'HYUNDIA'   #immutatble

#%% { curly bracket, comma}
#Set - ordered collection of simple items, immutable
set1 = set(['india', 'pakistan', 'england', 'australia','india'])
set1
type(set1)

set2 = {'INDIA','PAKISTAN',  "INDIA"}  #better way
set2
set3 = {'Australia', 'South Africa', 'INDIA'}
set3
sorted(set3)
set2.union(set3) #set2 | set3
Ejemplo n.º 47
0
from frozendict import frozendict

RAW_SENTIMENT_LABELS = ('negative', 'neutral', 'positive')
NN_RSL_TO_INT = frozendict({v: i for i, v in enumerate(RAW_SENTIMENT_LABELS)})


class VocabUtil:
    def __init__(self):
        self.raw_sentiment_labels = RAW_SENTIMENT_LABELS
        self.nn_rsl_to_int = NN_RSL_TO_INT

    def get_output_vocab_size(self) -> int:
        return len(self.nn_rsl_to_int)
Ejemplo n.º 48
0
 def build(cls, fmapper, honored):
     obj = object.__new__(HaloScheme)
     obj._mapper = frozendict(fmapper)
     obj._honored = frozendict(honored)
     return obj
Ejemplo n.º 49
0
 def _draw_hist(self, m, v, _options=frozendict(), **options):
     return AddplotExpression(f'exp(-(x-{self._num_fmt(m)})^2 / 2 / {self._num_fmt(v)}) / {self._num_fmt(v**0.5)}',
                              options={**options, **_options})
Ejemplo n.º 50
0
    def omapper(self):
        """
        Logical decomposition of the DOMAIN region into OWNED and CORE sub-regions.

        This is "cumulative" over all DiscreteFunctions in the HaloScheme; it also
        takes into account IterationSpace offsets induced by SubDomains/SubDimensions.

        Examples
        --------
        Consider a HaloScheme comprising two one-dimensional Functions, ``u``
        and ``v``.  ``u``'s halo, on the LEFT and RIGHT DataSides respectively,
        is (2, 2), while ``v``'s is (4, 4). The situation is depicted below.

              ^^oo----------------oo^^     u
            ^^^^oooo------------oooo^^^^   v

        Where '^' represents a HALO point, 'o' a OWNED point, and '-' a CORE point.
        Together, the 'o' and '-' points constitute the DOMAIN region.

        In this example, the "cumulative" OWNED size is (left=4, right=4), that is
        the max on each DataSide across all Functions, namely ``u`` and ``v``.

        The ``omapper`` will contain the following entries:

            [(((d, CORE, CENTER),), {d: (d_m + 4, d_M - 4)}),
             (((d, OWNED, LEFT),), {d: (d_m, min(d_m + 3, d_M))}),
             (((d, OWNED, RIGHT),), {d: (max(d_M - 3, d_m), d_M)})]

        In presence of SubDomains (or, more generally, iteration over SubDimensions),
        the "true" DOMAIN is actually smaller. For example, consider again the
        example above, but now with a SubDomain that excludes the first ``nl``
        and the last ``nr`` DOMAIN points, where ``nl >= 0`` and ``nr >= 0``. Often,
        ``nl`` and ``nr`` are referred to as the "thickness" of the SubDimension (see
        also SubDimension.__doc__). For example, the situation could be as below

              ^^ooXXX----------XXXoo^^     u
            ^^^^ooooX----------Xoooo^^^^   v

        Where 'X' is a CORE point excluded by the computation due to the SubDomain.
        Here, the 'o' points are outside of the SubDomain, but in general they could
        also be inside. The ``omapper`` is constructed taking into account that
        SubDomains are iterated over with min point ``d_m + nl`` and max point
        ``d_M - nr``. Here, the ``omapper`` is:

            [(((d, CORE, CENTER),), {d: (d_m + 4, d_M - 4),
                                     nl: (max(nl - 4, 0),),
                                     nr: (max(nr - 4, 0),)}),
             (((d, OWNED, LEFT),), {d: (d_m, min(d_m + 3, d_M - nr)),
                                    nl: (nl,),
                                    nr: (0,)}),
             (((d, OWNED, RIGHT),), {d: (max(d_M - 3, d_m + nl), d_M),
                                     nl: (0,),
                                     nr: (nr,)})]

        To convince ourselves that this makes sense, we consider a number of cases.
        For now, we assume ``|d_M - d_m| > HALO``, that is the left-HALO and right-HALO
        regions do not overlap.

            1. The SubDomain thickness is 0, which is like there were no SubDomains.
               By instantiating the template above with ``nl = 0`` and ``nr = 0``,
               it is trivial to see that we fall back to the non-SubDomain case.

            2. The SubDomain thickness is as big as the HALO region size, that is
               ``nl = 4`` and ``nr = 4``. The ``omapper`` is such that no iterations
               will be performed in the OWNED regions (i.e., "everything is CORE").

            3. The SubDomain left-thickness is smaller than the left-HALO region size,
               while the SubDomain right-thickness is larger than the right-Halo region
               size. This means that some left-OWNED points are within the SubDomain,
               while the RIGHT-OWNED are outside. For example, take ``nl = 1`` and
               ``nr = 5``; the iteration regions will then be:

                - (CORE, CENTER): {d: (d_m + 4, d_M - 4), nl: (0,), nr: (1,)}, so
                  the min point is ``d_m + 4``, while the max point is ``d_M - 5``.

                - (OWNED, LEFT): {d: (d_m, d_m + 3), nl: (1,), nr: (0,)}, so the
                  min point is ``d_m + 1``, while the max point is ``dm + 3``.

                - (OWNED, RIGHT): {d: (d_M - 3, d_M), nl: (0,), nr: (5,)}, so the
                  min point is ``d_M - 3``, while the max point is ``d_M - 5``,
                  which implies zero iterations in this region.

        Let's now assume that the left-HALO and right-HALO regions overlap. For example,
        ``d_m = 0`` and ``d_M = 1`` (i.e., the DOMAIN only has two points), with the HALO
        size that is still (4, 4).

            4. Let's take ``nl = 1`` and ``nr = 0``. That is, only one point is in
               the SubDomain and should be updated. We again instantiate the iteration
               regions and obtain:

                - (CORE, CENTER): {d: (d_m + 4, d_M - 4), nl: (0,), nr: (0,)}, so
                  the min point is ``d_m + 4 = 4``, while the max point is
                  ``d_M - 4 = -3``, which implies zero iterations in this region.

                - (OWNED, LEFT): {d: (d_m, min(d_m + 3, d_M - nr)), nl: (1,), nr: (0,)},
                  so the min point is ``d_m + 1 = 1``, while the max point is
                  ``min(d_m + 3, d_M - nr) = min(3, 1) = 1``, which implies that there
                  is exactly one point in this region.

                - (OWNED, RIGHT): {d: (max(d_M - 3, d_m + nl), d_M), nl: (0,), nr: (0,)},
                  so the min point is ``max(d_M - 3, d_m + nl) = max(-2, 1) = 1``, while
                  the max point is ``d_M = 1``, which implies that there is exactly one
                  point in this region, and this point is redundantly computed as it's
                  logically the same as that in the (OWNED, LEFT) region.

        Notes
        -----
        For each Function, the '^' and 'o' are exactly the same on *all MPI
        ranks*, so the output of this method is guaranteed to be consistent
        across *all MPI ranks*.
        """
        items = [((d, CENTER), (d, LEFT), (d, RIGHT)) for d in self.dimensions]

        processed = []
        for item in product(*items):
            where = []
            mapper = {}
            for d, s in item:
                osl, osr = self.owned_size[d]

                # Handle SubDomain/SubDimensions to-honor offsets
                nl = Max(0, *[i for i, _ in self.honored.get(d, [])])
                nr = Max(0, *[i for _, i in self.honored.get(d, [])])

                if s is CENTER:
                    where.append((d, CORE, s))
                    mapper[d] = (d.symbolic_min + osl, d.symbolic_max - osr)
                    if nl != 0:
                        mapper[nl] = (Max(nl - osl, 0), )
                    if nr != 0:
                        mapper[nr] = (Max(nr - osr, 0), )
                else:
                    where.append((d, OWNED, s))
                    if s is LEFT:
                        mapper[d] = (d.symbolic_min,
                                     Min(d.symbolic_min + osl - 1,
                                         d.symbolic_max - nr))
                        if nl != 0:
                            mapper[nl] = (nl, )
                            mapper[nr] = (0, )
                    else:
                        mapper[d] = (Max(d.symbolic_max - osr + 1,
                                         d.symbolic_min + nl), d.symbolic_max)
                        if nr != 0:
                            mapper[nl] = (0, )
                            mapper[nr] = (nr, )
            processed.append((tuple(where), frozendict(mapper)))

        _, core = processed.pop(0)
        owned = processed

        return OMapper(core, owned)
Ejemplo n.º 51
0
def test_illustration():
    """
    A `Illustration` can be cast from a dict.
    """
    class _Section(DocElem):
        """Test Section"""
        def __init__(self, title, *args, **kwargs):
            super().__init__(*args, title=title, **kwargs)

        @field.cast(Narrative)
        def narrative(self):
            raise FieldIsRequired

        @field.cast(Data)
        def data(self):
            return NA

        @field.cast(Illustration)
        def illustration(self):
            return NA

        def __call__(self, adapter, model):
            return Record(
                narrative=self.narrative(adapter, model),
                data=self.data(adapter, model),
                illustration=self.illustration(adapter, model))


    p = Bars(xvar="layer", xlabel="Layer",
             yvar="cell_density", ylabel="Cell Density")
    d = Document("test")
    s = _Section("Illustration", parent=d,
                 narrative="Test",
                 data=mock_cell_density(),
                 illustration=frozendict(
                     caption="Caption",
                     figures=p(mock_cell_density())))
    assert isinstance(s.narrative, Narrative), type(s.narrative)
    assert isinstance(s.data, Data), type(s.data)
    try:
        illustration = s.illustration
    except AttributeError:
        assert False

    assert isinstance(illustration, Illustration), type(illustration)

    v = s(MockAdapter(), MockModel())
    
    assert v.narrative == "Test"
    assert isinstance(v.data, pd.DataFrame)
    assert "layer" in v.data.columns
    assert "cell_density" in v.data.columns
    try:
        vill = v.illustration
    except AttributeError:
        assert False

    try:
        caption = vill.caption
    except AttributeError:
        assert False
    assert caption == "Caption"
    
    try:
        figures = vill.figures
    except AttributeError:
        assert False
Ejemplo n.º 52
0
 def sumweights(weights: Sequence[Tuple[float, frozendict]]):
     ts = [item for i in weights for item in i.keys()]
     if ts:
         return frozendict({min(ts): 0})
     else:
         return frozendict()
Ejemplo n.º 53
0
 def __torch_function__(self, func, types, args=(), kwargs=frozendict()):
     ret = super().__torch_function__(func, types, args, kwargs)
     return self._meta_update(ret) if isinstance(ret, type(self)) else ret
Ejemplo n.º 54
0
    def test_get_state_for_event(self):
        # this defaults to a linear DAG as each new injection defaults to whatever
        # forward extremities are currently in the DB for this room.
        e1 = self.inject_state_event(self.room, self.u_alice,
                                     EventTypes.Create, "", {})
        e2 = self.inject_state_event(self.room, self.u_alice, EventTypes.Name,
                                     "", {"name": "test room"})
        e3 = self.inject_state_event(
            self.room,
            self.u_alice,
            EventTypes.Member,
            self.u_alice.to_string(),
            {"membership": Membership.JOIN},
        )
        e4 = self.inject_state_event(
            self.room,
            self.u_bob,
            EventTypes.Member,
            self.u_bob.to_string(),
            {"membership": Membership.JOIN},
        )
        e5 = self.inject_state_event(
            self.room,
            self.u_bob,
            EventTypes.Member,
            self.u_bob.to_string(),
            {"membership": Membership.LEAVE},
        )

        # check we get the full state as of the final event
        state = self.get_success(
            self.storage.state.get_state_for_event(e5.event_id))

        self.assertIsNotNone(e4)

        self.assertStateMapEqual(
            {
                (e1.type, e1.state_key): e1,
                (e2.type, e2.state_key): e2,
                (e3.type, e3.state_key): e3,
                # e4 is overwritten by e5
                (e5.type, e5.state_key): e5,
            },
            state,
        )

        # check we can filter to the m.room.name event (with a '' state key)
        state = self.get_success(
            self.storage.state.get_state_for_event(
                e5.event_id, StateFilter.from_types([(EventTypes.Name, "")])))

        self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)

        # check we can filter to the m.room.name event (with a wildcard None state key)
        state = self.get_success(
            self.storage.state.get_state_for_event(
                e5.event_id,
                StateFilter.from_types([(EventTypes.Name, None)])))

        self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)

        # check we can grab the m.room.member events (with a wildcard None state key)
        state = self.get_success(
            self.storage.state.get_state_for_event(
                e5.event_id,
                StateFilter.from_types([(EventTypes.Member, None)])))

        self.assertStateMapEqual(
            {
                (e3.type, e3.state_key): e3,
                (e5.type, e5.state_key): e5
            }, state)

        # check we can grab a specific room member without filtering out the
        # other event types
        state = self.get_success(
            self.storage.state.get_state_for_event(
                e5.event_id,
                state_filter=StateFilter(
                    types=frozendict({
                        EventTypes.Member:
                        frozenset({self.u_alice.to_string()})
                    }),
                    include_others=True,
                ),
            ))

        self.assertStateMapEqual(
            {
                (e1.type, e1.state_key): e1,
                (e2.type, e2.state_key): e2,
                (e3.type, e3.state_key): e3,
            },
            state,
        )

        # check that we can grab everything except members
        state = self.get_success(
            self.storage.state.get_state_for_event(
                e5.event_id,
                state_filter=StateFilter(
                    types=frozendict({EventTypes.Member: frozenset()}),
                    include_others=True,
                ),
            ))

        self.assertStateMapEqual(
            {
                (e1.type, e1.state_key): e1,
                (e2.type, e2.state_key): e2
            }, state)

        #######################################################
        # _get_state_for_group_using_cache tests against a full cache
        #######################################################

        room_id = self.room.to_string()
        group_ids = self.get_success(
            self.storage.state.get_state_groups_ids(room_id, [e5.event_id]))
        group = list(group_ids.keys())[0]

        # test _get_state_for_group_using_cache correctly filters out members
        # with types=[]
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: frozenset()}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual(
            {
                (e1.type, e1.state_key): e1.event_id,
                (e2.type, e2.state_key): e2.event_id,
            },
            state_dict,
        )

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: frozenset()}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({}, state_dict)

        # test _get_state_for_group_using_cache correctly filters in members
        # with wildcard types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: None}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual(
            {
                (e1.type, e1.state_key): e1.event_id,
                (e2.type, e2.state_key): e2.event_id,
            },
            state_dict,
        )

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: None}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual(
            {
                (e3.type, e3.state_key): e3.event_id,
                # e4 is overwritten by e5
                (e5.type, e5.state_key): e5.event_id,
            },
            state_dict,
        )

        # test _get_state_for_group_using_cache correctly filters in members
        # with specific types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=True,
            ),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual(
            {
                (e1.type, e1.state_key): e1.event_id,
                (e2.type, e2.state_key): e2.event_id,
            },
            state_dict,
        )

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=True,
            ),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({(e5.type, e5.state_key): e5.event_id},
                             state_dict)

        # test _get_state_for_group_using_cache correctly filters in members
        # with specific types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=False,
            ),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({(e5.type, e5.state_key): e5.event_id},
                             state_dict)

        #######################################################
        # deliberately remove e2 (room name) from the _state_group_cache

        cache_entry = self.state_datastore._state_group_cache.get(group)
        state_dict_ids = cache_entry.value

        self.assertEqual(cache_entry.full, True)
        self.assertEqual(cache_entry.known_absent, set())
        self.assertDictEqual(
            state_dict_ids,
            {
                (e1.type, e1.state_key): e1.event_id,
                (e2.type, e2.state_key): e2.event_id,
            },
        )

        state_dict_ids.pop((e2.type, e2.state_key))
        self.state_datastore._state_group_cache.invalidate(group)
        self.state_datastore._state_group_cache.update(
            sequence=self.state_datastore._state_group_cache.sequence,
            key=group,
            value=state_dict_ids,
            # list fetched keys so it knows it's partial
            fetched_keys=((e1.type, e1.state_key), ),
        )

        cache_entry = self.state_datastore._state_group_cache.get(group)
        state_dict_ids = cache_entry.value

        self.assertEqual(cache_entry.full, False)
        self.assertEqual(cache_entry.known_absent, {(e1.type, e1.state_key)})
        self.assertDictEqual(state_dict_ids,
                             {(e1.type, e1.state_key): e1.event_id})

        ############################################
        # test that things work with a partial cache

        # test _get_state_for_group_using_cache correctly filters out members
        # with types=[]
        room_id = self.room.to_string()
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: frozenset()}),
                                     include_others=True),
        )

        self.assertEqual(is_all, False)
        self.assertDictEqual({(e1.type, e1.state_key): e1.event_id},
                             state_dict)

        room_id = self.room.to_string()
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: frozenset()}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({}, state_dict)

        # test _get_state_for_group_using_cache correctly filters in members
        # wildcard types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: None}),
                                     include_others=True),
        )

        self.assertEqual(is_all, False)
        self.assertDictEqual({(e1.type, e1.state_key): e1.event_id},
                             state_dict)

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(types=frozendict(
                {EventTypes.Member: None}),
                                     include_others=True),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual(
            {
                (e3.type, e3.state_key): e3.event_id,
                (e5.type, e5.state_key): e5.event_id,
            },
            state_dict,
        )

        # test _get_state_for_group_using_cache correctly filters in members
        # with specific types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=True,
            ),
        )

        self.assertEqual(is_all, False)
        self.assertDictEqual({(e1.type, e1.state_key): e1.event_id},
                             state_dict)

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=True,
            ),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({(e5.type, e5.state_key): e5.event_id},
                             state_dict)

        # test _get_state_for_group_using_cache correctly filters in members
        # with specific types
        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=False,
            ),
        )

        self.assertEqual(is_all, False)
        self.assertDictEqual({}, state_dict)

        (
            state_dict,
            is_all,
        ) = self.state_datastore._get_state_for_group_using_cache(
            self.state_datastore._state_group_members_cache,
            group,
            state_filter=StateFilter(
                types=frozendict(
                    {EventTypes.Member: frozenset({e5.state_key})}),
                include_others=False,
            ),
        )

        self.assertEqual(is_all, True)
        self.assertDictEqual({(e5.type, e5.state_key): e5.event_id},
                             state_dict)
Ejemplo n.º 55
0
DEFAULT_CONFIG = frozendict({
    # Worker identification
    "provisioner_id":
    "test-dummy-provisioner",
    "worker_group":
    "test-dummy-workers",
    "worker_type":
    "dummy-worker-myname",
    "worker_id":
    os.environ.get("SCRIPTWORKER_WORKER_ID", "dummy-worker-myname1"),
    "credentials":
    frozendict({
        "clientId": "...",
        "accessToken": "...",
        "certificate": "...",
    }),

    # Worker log settings
    "log_datefmt":
    "%Y-%m-%dT%H:%M:%S",
    "log_fmt":
    "%(asctime)s %(levelname)8s - %(message)s",
    "log_max_bytes":
    1024 * 1024 * 512,
    "log_num_backups":
    10,

    # intervals are expressed in seconds
    "artifact_expiration_hours":
    24,
    "task_max_timeout":
    60 * 20,
    "reclaim_interval":
    300,
    "poll_interval":
    5,
    "sign_key_timeout":
    60 * 2,
    "task_script": ("bash", "-c", "echo foo && sleep 19 && exit 1"),
    "verbose":
    True,

    # Task settings
    "work_dir":
    "...",
    "log_dir":
    "...",
    "artifact_dir":
    "...",
    "task_log_dir":
    "...",  # set this to ARTIFACT_DIR/public/logs
    "git_commit_signing_pubkey_dir":
    "...",
    "artifact_upload_timeout":
    60 * 20,
    "aiohttp_max_connections":
    15,

    # chain of trust settings
    "sign_chain_of_trust":
    True,
    "verify_chain_of_trust":
    False,  # TODO True
    "verify_cot_signature":
    False,
    "cot_job_type":
    "unknown",  # e.g., signing
    "cot_product":
    "firefox",

    # Specify a default gpg home other than ~/.gnupg
    "gpg_home":
    None,
    # A list of additional gpg cmdline options
    "gpg_options":
    None,
    # The path to the gpg executable.
    "gpg_path":
    None,
    # The path to the public/secret keyrings, if we're not using the default
    "gpg_public_keyring":
    '%(gpg_home)s/pubring.gpg',
    "gpg_secret_keyring":
    '%(gpg_home)s/secring.gpg',
    # Boolean to use the gpg agent
    "gpg_use_agent":
    False,
    "gpg_encoding":
    'utf-8',
    "base_gpg_home_dir":
    "...",
    "gpg_lockfile":
    os.path.join(os.getcwd(), "gpg_homedir.lock"),
    "git_key_repo_dir":
    "...",
    "git_key_repo_url":
    "https://github.com/mozilla-releng/cot-gpg-keys.git",
    "last_good_git_revision_file":
    os.path.join(os.getcwd(), "git_revision"),
    "pubkey_path":
    "...",
    "privkey_path":
    "...",
    "my_email":
    "*****@*****.**",
    "chain_of_trust_hash_algorithm":
    "sha256",
    "cot_schema_path":
    os.path.join(os.path.dirname(__file__), "data", "cot_v1_schema.json"),

    # for download url validation.  The regexes need to define a 'filepath'.
    'valid_artifact_rules': (frozendict({
        "schemes": ("https", ),
        "netlocs": ("queue.taskcluster.net", ),
        "path_regexes":
        ("^/v1/task/(?P<taskId>[^/]+)(/runs/\\d+)?/artifacts/(?P<filepath>.*)$",
         ),
    }), ),

    # docker image shas
    "docker_image_allowlists":
    frozendict({
        "decision": (
            "sha256:31035ed23eba3ede02b988be39027668d965b9fc45b74b932b2338a4e7936cf9",
            "sha256:7320c720c770e9f93df26f7da742db72b334b7ded77539fb240fc4a28363de5a",
            "sha256:9db282317340838f0015335d74ed56c4ee0dbad588be33e6999928a181548587",
            "sha256:a22b90c7e16191a701760ef4f9159e86289ba598bf8ff5b22b7b94867530460d",
        ),
        "docker-image": (
            "sha256:74c5a18ce1768605ce9b1b5f009abac1ff11b55a007e2d03733cd6e95847c747",
            "sha256:d438d7818b6a47a0b1d49943ab12b5c504b65161806658e4c28f5f2aac821b9e",
            "sha256:13b80a7a6b8e10c6096aba5a435529fbc99b405f56012e57cc6835facf4b40fb",
        )
    }),

    # git gpg homedir layout
    "gpg_homedirs":
    frozendict({
        "docker-worker":
        frozendict({
            "type": "flat",
            "ignore_suffixes": (".md", )
        }),
        "generic-worker":
        frozendict({
            "type": "flat",
            "ignore_suffixes": (".md", )
        }),
        "scriptworker":
        frozendict({
            "type": "signed",
            "ignore_suffixes": (".md", )
        }),
    }),

    # scriptworker identification
    "scriptworker_worker_types": (
        "balrogworker-v1",
        "beetmoverworker-v1",
        "pushapk-v1",
        "signing-linux-v1",
    ),
    "scriptworker_provisioners": ("scriptworker-prov-v1", ),

    # valid hash algorithms for chain of trust artifacts
    "valid_hash_algorithms": (
        "sha256",
        "sha512",
    ),

    # decision task cot
    "valid_decision_worker_types": ("gecko-decision", ),

    # docker-image cot
    "valid_docker_image_worker_types": (
        "taskcluster-images",  # TODO: Remove this image once docker-images is the only valid worker type
        "gecko-images",
    ),

    # for trace_back_to_*_tree.  These repos have access to restricted scopes;
    # all other repos are relegated to CI scopes.
    'valid_vcs_rules': (
        frozendict({
            # TODO index by cot_product
            "schemes": (
                "https",
                "ssh",
            ),
            "netlocs": ("hg.mozilla.org", ),
            "path_regexes": (
                "^(?P<path>/mozilla-(central|unified))(/|$)",
                "^(?P<path>/integration/(autoland|fx-team|mozilla-inbound))(/|$)",
                "^(?P<path>/releases/mozilla-(aurora|beta|release|esr45|esr52))(/|$)",
                # XXX remove /projects/date when taskcluster nightly migration is
                #     tier1 and landed on mozilla-central
                # XXX remove /projects/jamun when we no longer release firefox
                #     from it
                "^(?P<path>/projects/(date|jamun))(/|$)",
            ),
        }), ),

    # Map scopes to restricted-level
    'cot_restricted_scopes':
    frozendict({
        'firefox':
        frozendict({
            'project:releng:balrog:server:nightly':
            'nightly',
            'project:releng:balrog:server:aurora':
            'aurora',
            'project:releng:balrog:server:beta':
            'beta',
            'project:releng:balrog:server:release':
            'release',
            'project:releng:balrog:server:esr':
            'esr',
            'project:releng:balrog:nightly':
            'all-nightly-branches',
            'project:releng:beetmover:bucket:release':
            'all-release-branches',
            'project:releng:googleplay:release':
            'release',
            'project:releng:signing:cert:release-signing':
            'all-release-branches',
            'project:releng:googleplay:beta':
            'betatest',
            'project:releng:googleplay:aurora':
            'auroratest',
            'project:releng:beetmover:bucket:nightly':
            'all-nightly-branches',
            'project:releng:signing:cert:nightly-signing':
            'all-nightly-branches',
        })
    }),
    # Map restricted-level to trees
    'cot_restricted_trees':
    frozendict({
        'firefox':
        frozendict({
            # Which repos can perform release actions?
            # Allow aurora for staging betas.
            # XXX remove /projects/jamun when we no longer release firefox
            #     from it
            'all-release-branches': (
                "/releases/mozilla-aurora",
                "/releases/mozilla-beta",
                "/releases/mozilla-release",
                "/releases/mozilla-esr45",
                "/releases/mozilla-esr52",
                "/projects/jamun",
            ),
            # Limit things like pushapk to just these branches
            'release': ("/releases/mozilla-release", ),
            'beta': ("/releases/mozilla-beta", ),
            # TODO remove it once pushapk is landed on beta
            'betatest': (
                "/releases/mozilla-beta",
                "/projects/jamun",
            ),
            'aurora': ("/releases/mozilla-aurora", ),
            # TODO remove it once pushapk is landed on aurora
            'auroratest': (
                "/releases/mozilla-aurora",
                "/projects/date",
            ),
            'esr': (
                "/releases/mozilla-esr45",
                "/releases/mozilla-esr52",
            ),
            'nightly': ("/mozilla-central", ),

            # Which repos can do nightly signing?
            # XXX remove /projects/date when taskcluster nightly migration is
            #     tier1 and landed on mozilla-central
            # XXX remove /projects/jamun when we no longer release firefox
            #     from it
            'all-nightly-branches': (
                "/mozilla-central",
                "/releases/mozilla-unified",
                "/releases/mozilla-aurora",
                "/releases/mozilla-beta",
                "/releases/mozilla-release",
                "/releases/mozilla-esr45",
                "/releases/mozilla-esr52",
                "/projects/jamun",
                "/projects/date",
            ),
        }),
    }),
})
Ejemplo n.º 56
0
    def test_state_filter_difference_no_include_other_minus_include_other(
            self):
        """
        Tests the StateFilter.approx_difference method
        where, in a.approx_difference(b), only b has the include_others flag set.
        """
        # (wildcard on state keys) - (wildcard on state keys):
        self.assert_difference(
            StateFilter.freeze(
                {
                    EventTypes.Member: None,
                    EventTypes.Create: None
                },
                include_others=False,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: None,
                    EventTypes.CanonicalAlias: None
                },
                include_others=True,
            ),
            StateFilter(types=frozendict(), include_others=False),
        )

        # (wildcard on state keys) - (specific state keys)
        # This one is an over-approximation because we can't represent
        # 'all state keys except a few named examples'
        self.assert_difference(
            StateFilter.freeze({EventTypes.Member: None},
                               include_others=False),
            StateFilter.freeze(
                {EventTypes.Member: {"@wombat:spqr"}},
                include_others=True,
            ),
            StateFilter.freeze({EventTypes.Member: None},
                               include_others=False),
        )

        # (wildcard on state keys) - (no state keys)
        self.assert_difference(
            StateFilter.freeze(
                {EventTypes.Member: None},
                include_others=False,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: set(),
                },
                include_others=True,
            ),
            StateFilter.freeze(
                {EventTypes.Member: None},
                include_others=False,
            ),
        )

        # (specific state keys) - (wildcard on state keys):
        self.assert_difference(
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
                    EventTypes.CanonicalAlias: {""},
                },
                include_others=False,
            ),
            StateFilter.freeze(
                {EventTypes.Member: None},
                include_others=True,
            ),
            StateFilter(
                types=frozendict(),
                include_others=False,
            ),
        )

        # (specific state keys) - (specific state keys)
        # This one is an over-approximation because we can't represent
        # 'all state keys except a few named examples'
        self.assert_difference(
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
                    EventTypes.CanonicalAlias: {""},
                },
                include_others=False,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@wombat:spqr"},
                },
                include_others=True,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@spqr:spqr"},
                },
                include_others=False,
            ),
        )

        # (specific state keys) - (no state keys)
        self.assert_difference(
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
                    EventTypes.CanonicalAlias: {""},
                },
                include_others=False,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: set(),
                },
                include_others=True,
            ),
            StateFilter.freeze(
                {
                    EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
                },
                include_others=False,
            ),
        )
Ejemplo n.º 57
0
def _isotype_annotation_dagfuncs(
    isotypemap: Mapping[str, str] = None,
    isotypemap_file: str = None,
    idmap: Mapping[str, Set[str]] = None,
    idmap_file: str = None,
    isotype_names: Sequence[str] = None,
) -> hdag.utils.AddFuncDict:
    """Return functions for annotating history DAG nodes with inferred
    isotypes.

    Args:
        isotypemap: A dictionary mapping original IDs to observed isotype names
        isotypemap_file: A csv file providing an `isotypemap`
        idmap: A dictionary mapping unique sequence IDs to sets of original IDs of observed sequences
        idmap_file: A csv file providing an `idmap`
        isotype_names: A sequence of isotype names containing values in `isotypemap`, in the correct switching order

    Returns:
        A :meth:`historydag.utils.AddFuncDict` which may be passed as keyword arguments
        to :meth:`historydag.HistoryDag.weight_count`, :meth:`historydag.HistoryDag.trim_optimal_weight`,
        or :meth:`historydag.HistoryDag.optimal_weight_annotate`
        methods to trim or annotate a :meth:`historydag.HistoryDag` according to isotype parsimony.
        Weight format is ``frozendict[Isotype, int]``, where the value is the count of observed isotypes, and 0
        for unobserved (internal) nodes.
    """
    if isotype_names is None:
        isotype_names = default_isotype_order
    if isotypemap_file and isotypemap is None:
        with open(isotypemap_file, "r") as fh:
            isotypemap = dict(
                map(lambda x: x.strip(), line.split(",")) for line in fh)
    elif isotypemap_file is None:
        raise ValueError("Either isotypemap or isotypemap_file is required")

    if idmap is None:
        if idmap_file is None:
            raise TypeError(
                "either idmap or idmap_file is required for isotyping")
        else:
            with open(idmap_file, "r") as fh:
                idmap = {}
                for line in fh:
                    seqid, cell_ids = line.rstrip().split(",")
                    cell_idset = {
                        cell_id
                        for cell_id in cell_ids.split(":") if cell_id
                    }
                    if len(cell_idset) > 0:
                        idmap[seqid] = cell_idset
    newidmap = explode_idmap(idmap, isotypemap)
    newisotype = IsotypeTemplate(isotype_names, weight_matrix=None).new

    def start_func(n: hdag.HistoryDagNode):
        seqid = n.attr["name"]
        if seqid in newidmap:
            return frozendict({
                newisotype(name): len(oldidset)
                for name, oldidset in newidmap[seqid].items()
            })
        else:
            return frozendict()

    def sumweights(weights: Sequence[Tuple[float, frozendict]]):
        ts = [item for i in weights for item in i.keys()]
        if ts:
            return frozendict({min(ts): 0})
        else:
            return frozendict()

    return hdag.utils.AddFuncDict(
        {
            "start_func": start_func,
            "edge_weight_func": lambda n1, n2: frozendict(),
            "accum_func": sumweights,
        },
        name="Inferred Isotype",
    )
Ejemplo n.º 58
0
 def __init__(self, intervals, parts):
     super(DataSpace, self).__init__(intervals)
     self._parts = frozendict(parts)
Ejemplo n.º 59
0
 def __init__(self, attr_map=None):
     if attr_map is None:
         attr_map = {}
     self._attr_map = frozendict(attr_map)
Ejemplo n.º 60
0
 def __init__(self, intervals, sub_iterators=None, directions=None):
     super(IterationSpace, self).__init__(intervals)
     self._sub_iterators = frozendict(sub_iterators or {})
     self._directions = frozendict(directions or {})