예제 #1
0
파일: test_gala.py 프로젝트: jefferis/gala
def test_generate_examples_4_channel():
    """Run a flat epoch and an active epoch of learning, compare learned sets.

    The *order* of the edges learned by learn_flat is not guaranteed, so we
    test the *set* of learned edges for the flat epoch. The learned epoch
    *should* have a fixed order, so we test array equality.

    Uses 4 channel probabilities.
    """
    g_train = agglo.Rag(ws_train, p4_train, feature_manager=fc)
    _, alldata = g_train.learn_agglomerate(gt_train, fc, classifier="naive bayes")
    testfn = (
        "example-data/train-naive-bayes-merges4-py3.pck"
        if PYTHON_VERSION == 3
        else "example-data/train-naive-bayes-merges4-py2.pck"
    )
    exp0, exp1 = load_pickle(os.path.join(rundir, testfn))
    expected_edges = set(map(tuple, exp0))
    edges = set(map(tuple, alldata[0][3]))
    merges = alldata[1][3]
    assert edges == expected_edges
    assert_array_equal(merges, exp1)
    nb = GaussianNB().fit(alldata[0][0], alldata[0][1][:, 0])
    nbexp = joblib.load(os.path.join(rundir, "example-data/naive-bayes-4.joblib"))
    assert_allclose(nb.theta_, nbexp.theta_, atol=1e-10)
    assert_allclose(nb.sigma_, nbexp.sigma_, atol=1e-4)
    assert_allclose(nb.class_prior_, nbexp.class_prior_, atol=1e-7)
    def assemble(cls, user_id, methods, scope_id, expires_at, audit_ids,
                 federated_info):
        """Assemble the project-scoped payload of a federated token.

        :param user_id: ID of the user in the token request
        :param methods: list of authentication methods used
        :param scope_id: ID of the project or domain ID to scope to
        :param expires_at: datetime of the token's expiration
        :param audit_ids: list of the token's audit IDs
        :param federated_info: dictionary containing the identity provider ID,
                               protocol ID, federated domain ID and group IDs
        :returns: the payload of a federated token

        """
        b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
        methods = auth_plugins.convert_method_list_to_integer(methods)
        b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(scope_id)
        b_group_ids = list(map(cls.pack_group_id,
                               federated_info['group_ids']))
        b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
            federated_info['idp_id'])
        protocol_id = federated_info['protocol_id']
        expires_at_int = cls._convert_time_string_to_float(expires_at)
        b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
                               audit_ids))

        return (b_user_id, methods, b_scope_id, b_group_ids, b_idp_id,
                protocol_id, expires_at_int, b_audit_ids)
예제 #3
0
    def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
              variables=None):
        outputs = self._as_list(outputs)
        all_inputs = self._as_list(inputs)[:]
        out_outputs = list(map(escape_path, outputs))
        all_inputs = list(map(escape_path, all_inputs))

        if implicit:
            implicit = list(map(escape_path, self._as_list(implicit)))
            all_inputs.append('|')
            all_inputs.extend(implicit)
        if order_only:
            order_only = list(map(escape_path, self._as_list(order_only)))
            all_inputs.append('||')
            all_inputs.extend(order_only)

        self._line('build %s: %s' % (' '.join(out_outputs),
                                     ' '.join([rule] + all_inputs)))

        if variables:
            if isinstance(variables, dict):
                iterator = iter(list(variables.items()))
            else:
                iterator = iter(variables)

            for key, val in iterator:
                self.variable(key, val, indent=1)

        return outputs
예제 #4
0
파일: test_gala.py 프로젝트: jefferis/gala
def test_generate_examples_1_channel():
    """Run a flat epoch and an active epoch of learning, compare learned sets.

    The *order* of the edges learned by learn_flat is not guaranteed, so we
    test the *set* of learned edges for the flat epoch. The learned epoch
    *should* have a fixed order, so we test array equality.

    Uses 1 channel probabilities.
    """
    g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
    _, alldata = g_train.learn_agglomerate(gt_train, fc, classifier="naive bayes")
    testfn = (
        "example-data/train-naive-bayes-merges1-py3.pck"
        if PYTHON_VERSION == 3
        else "example-data/train-naive-bayes-merges1-py2.pck"
    )
    exp0, exp1 = load_pickle(os.path.join(rundir, testfn))
    expected_edges = set(map(tuple, exp0))
    edges = set(map(tuple, alldata[0][3]))
    merges = alldata[1][3]
    assert edges == expected_edges
    # concordant is the maximum edges concordant in the Python 2.7 version.
    # The remaining edges diverge because of apparent differences
    # between Linux and OSX floating point handling.
    concordant = slice(None, 171) if PYTHON_VERSION == 2 else slice(None)
    assert_array_equal(merges[concordant], exp1[concordant])
    nb = GaussianNB().fit(alldata[0][0], alldata[0][1][:, 0])
    nbexp = joblib.load(os.path.join(rundir, "example-data/naive-bayes-1.joblib"))
    assert_allclose(nb.theta_, nbexp.theta_, atol=1e-10)
    assert_allclose(nb.sigma_, nbexp.sigma_, atol=1e-4)
    assert_allclose(nb.class_prior_, nbexp.class_prior_, atol=1e-7)
예제 #5
0
파일: django.py 프로젝트: fenildf/fabricio
    def get_revert_migrations(self, current_migrations, backup_migrations):
        current_migrations, all_migrations = itertools.tee(reversed(list(map(
            Migration,
            filter(None, current_migrations.splitlines()),
        ))))
        all_migrations = utils.OrderedSet(all_migrations)

        backup_migrations = reversed(list(map(
            Migration,
            filter(None, backup_migrations.splitlines()),
        )))

        revert_migrations = collections.OrderedDict()

        while True:
            while True:
                backup_migration = next(backup_migrations, None)
                if not backup_migration or backup_migration in all_migrations:
                    break
            for current_migration in current_migrations:
                if current_migration == backup_migration:
                    break
                revert_migration = self._get_parent_migration(
                    current_migration,
                    migrations=all_migrations,
                )
                revert_migrations[current_migration.app] = revert_migration

            if backup_migration is None:
                return revert_migrations.values()
    def disassemble(cls, payload):
        """Validate a project-scoped federated payload.

        :param token_string: a string representing the token
        :returns: a tuple containing the user_id, auth methods, scope_id,
                  expiration time (as str), audit_ids, and a dictionary
                  containing federated information such as the the identity
                  provider ID, the protocol ID, the federated domain ID and
                  group IDs

        """
        (is_stored_as_bytes, user_id) = payload[0]
        if is_stored_as_bytes:
            user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
        methods = auth_plugins.convert_integer_to_method_list(payload[1])
        (is_stored_as_bytes, scope_id) = payload[2]
        if is_stored_as_bytes:
            scope_id = cls.attempt_convert_uuid_bytes_to_hex(scope_id)
        group_ids = list(map(cls.unpack_group_id, payload[3]))
        (is_stored_as_bytes, idp_id) = payload[4]
        if is_stored_as_bytes:
            idp_id = cls.attempt_convert_uuid_bytes_to_hex(idp_id)
        protocol_id = payload[5]
        expires_at_str = cls._convert_int_to_time_string(payload[6])
        audit_ids = list(map(provider.base64_encode, payload[7]))
        federated_info = dict(idp_id=idp_id, protocol_id=protocol_id,
                              group_ids=group_ids)
        return (user_id, methods, scope_id, expires_at_str, audit_ids,
                federated_info)
예제 #7
0
    def test_xform_parsing_with_stock_questions(self):
        form_xml = self.get_xml('stock_form')
        schema = FormExportDataSchema._generate_schema_from_xform(
            XForm(form_xml),
            ['en'],
            self.app_id,
            1
        )
        self.assertEqual(len(schema.group_schemas), 1)
        group_schema = schema.group_schemas[0]

        self.assertEqual(len(group_schema.items), 6)
        self.assertTrue(all([item.doc_type == 'StockItem' for item in group_schema.items]))
        for parent_attr in ['@type', '@entity-id', '@date', '@section-id']:
            self.assertTrue(any(map(
                lambda item: item.path == [
                    PathNode(name='form'),
                    PathNode(name='balance:balance_one'),
                    PathNode(name=parent_attr),
                ],
                group_schema.items,
            )))

        for entry_attr in ['@id', '@quantity']:
            self.assertTrue(any(map(
                lambda item: item.path == [
                    PathNode(name='form'),
                    PathNode(name='balance:balance_one'),
                    PathNode(name='entry'),
                    PathNode(name=entry_attr),
                ],
                group_schema.items,
            )))
예제 #8
0
파일: argon_calc.py 프로젝트: NMGRL/pychron
    def setUp(self):
        rid = '60754-10'
        config = ConfigParser()
        p = '/Users/ross/Sandbox/pychron_validation_data.cfg'
        config.read(p)

        signals = [list(map(float, x.split(','))) for x in [config.get('Signals-{}'.format(rid), k)
                        for k in ['ar40', 'ar39', 'ar38', 'ar37', 'ar36']]]

        blanks = [list(map(float, x.split(','))) for x in [config.get('Blanks-{}'.format(rid), k)
                        for k in ['ar40', 'ar39', 'ar38', 'ar37', 'ar36']]]

        irradinfo = [list(map(float, x.split(','))) for x in [config.get('irrad-{}'.format(rid), k) for k in ['k4039', 'k3839', 'ca3937', 'ca3837', 'ca3637', 'cl3638']]]

        j = config.get('irrad-{}'.format(rid), 'j')
        j = [float(x) for x in j.split(',')]
        baselines = [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0)]
        backgrounds = [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0)]

        ar37df = config.getfloat('irrad-{}'.format(rid), 'ar37df')
        t = math.log(ar37df) / (constants.lambda_37.nominal_value * 365.25)
        irradinfo.append(t)

        # load results
        r = 'results-{}'.format(rid)
        self.age = config.getfloat(r, 'age')
        self.rad4039 = config.getfloat(r, 'rad4039')
        self.ca37k39 = config.getfloat(r, 'ca37k39')


        self.age_dict = calculate_arar_age(signals, baselines, blanks, backgrounds, j, irradinfo,
                               )
예제 #9
0
파일: base.py 프로젝트: merll/docker-map
    def get_network_create_endpoint_kwargs(self, action, endpoint_config, kwargs=None):
        """
        Generates keyword arguments for Docker's ``create_endpoint_config`` utility / ``EndpointConfig`` type as well
        as for ``connect_container_to_network``.

        :param action: Action configuration.
        :type action: ActionConfig
        :param endpoint_config: Network endpoint configuration.
        :type endpoint_config: dockermap.map.input.NetworkEndpoint
        :param kwargs: Additional keyword arguments to complement or override the configuration-based values.
        :type kwargs: dict
        :return: Resulting keyword arguments.
        :rtype: dict
        """
        map_name = action.config_id.map_name
        policy = self._policy
        c_kwargs = dict(
            ipv4_address=resolve_value(endpoint_config.ipv4_address),
            ipv6_address=resolve_value(endpoint_config.ipv6_address),
        )
        if endpoint_config.aliases:
            c_kwargs['aliases'] = list(map(resolve_value, endpoint_config.aliases))
        if endpoint_config.links:
            c_kwargs['links'] = [(policy.cname(map_name, l_name), alias or policy.get_hostname(l_name))
                                 for l_name, alias in endpoint_config.links]
        if endpoint_config.link_local_ips:
            c_kwargs['link_local_ips'] = list(map(resolve_value, endpoint_config.link_local_ips))
        update_kwargs(c_kwargs, kwargs)
        return c_kwargs
예제 #10
0
파일: newick.py 프로젝트: Ward9250/ete
def _get_features_string(self, features=None):
    """ Generates the extended newick string NHX with extra data about
    a node. """
    string = ""
    if features is None:
        features = []
    elif features == []:
        features = self.features

    for pr in features:
        if hasattr(self, pr):
            raw = getattr(self, pr)
            if type(raw) in ITERABLE_TYPES:
                raw = '|'.join(map(str, raw))
            elif type(raw) == dict:
                raw = '|'.join(map(lambda x,y: "%s-%s" %(x, y), six.iteritems(raw)))
            elif type(raw) == str:
                pass
            else:
                raw = str(raw)

            value = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
                             raw)
            if string != "":
                string +=":"
            string +="%s=%s"  %(pr, str(value))
    if string != "":
        string = "[&&NHX:"+string+"]"

    return string
예제 #11
0
파일: plot.py 프로젝트: AntoinedDMO/iris
def _get_plot_defn(cube, mode, ndims=2):
    """
    Return data and plot-axis coords given a cube & a mode of either
    POINT_MODE or BOUND_MODE.

    """
    if cube.ndim != ndims:
        msg = 'Cube must be %s-dimensional. Got %s dimensions.'
        raise ValueError(msg % (ndims, cube.ndim))

    # Start by taking the DimCoords from each dimension.
    coords = [None] * ndims
    for dim_coord in cube.dim_coords:
        dim = cube.coord_dims(dim_coord)[0]
        coords[dim] = dim_coord

    # When appropriate, restrict to 1D with bounds.
    if mode == iris.coords.BOUND_MODE:
        coords = list(map(_valid_bound_coord, coords))

    def guess_axis(coord):
        axis = None
        if coord is not None:
            axis = iris.util.guess_coord_axis(coord)
        return axis

    # Allow DimCoords in aux_coords to fill in for missing dim_coords.
    for dim, coord in enumerate(coords):
        if coord is None:
            aux_coords = cube.coords(dimensions=dim)
            aux_coords = [coord for coord in aux_coords
                          if isinstance(coord, iris.coords.DimCoord)]
            if aux_coords:
                key_func = lambda coord: coord._as_defn()
                aux_coords.sort(key=key_func)
                coords[dim] = aux_coords[0]

    if mode == iris.coords.POINT_MODE:
        # Allow multi-dimensional aux_coords to override the dim_coords
        # along the Z axis. This results in a preference for using the
        # derived altitude over model_level_number or level_height.
        # Limit to Z axis to avoid preferring latitude over grid_latitude etc.
        axes = list(map(guess_axis, coords))
        axis = 'Z'
        if axis in axes:
            for coord in cube.coords(dim_coords=False):
                if max(coord.shape) > 1 and \
                        iris.util.guess_coord_axis(coord) == axis:
                    coords[axes.index(axis)] = coord

    # Re-order the coordinates to achieve the preferred
    # horizontal/vertical associations.
    def sort_key(coord):
        order = {'X': 2, 'T': 1, 'Y': -1, 'Z': -2}
        axis = guess_axis(coord)
        return (order.get(axis, 0), coord and coord.name())
    sorted_coords = sorted(coords, key=sort_key)

    transpose = (sorted_coords != coords)
    return PlotDefn(sorted_coords, transpose)
예제 #12
0
def build_clause(query, attributes):
    # Iterate through each query segment.
    clause = None
    last = None
    for seg in query.segments:
        # Get the attribute in question.
        attribute = attributes[seg.path[0]]

        # Replace the initial path segment with the expanded
        # attribute path.
        seg.path[0:1] = attribute.path.split('.')

        # Boolean's should use `exact` rather than `iexact`.
        if attribute.type is bool:
            op = '__exact'
        else:
            op = OPERATOR_MAP[seg.operator]

        # Build the path from the segment.
        path = '__'.join(seg.path) + op

        # Construct a Q-object from the segment.
        q = reduce(operator.or_,
                   map(lambda x: Q((path, x)),
                       map(attribute.try_clean, seg.values)))

        # Combine the segment with the last.
        clause = last.combinator(clause, q) if last is not None else q
        last = seg

    # Return the constructed clause.
    return clause
예제 #13
0
 def disassemble(cls, payload):
     (is_stored_as_bytes, user_id) = payload[0]
     if is_stored_as_bytes:
         user_id = cls.convert_uuid_bytes_to_hex(user_id)
     methods = auth_plugins.convert_integer_to_method_list(payload[1])
     (is_stored_as_bytes, scope_id) = payload[2]
     if is_stored_as_bytes:
         scope_id = cls.convert_uuid_bytes_to_hex(scope_id)
     project_id = (
         scope_id
         if cls.version == FederatedProjectScopedPayload.version else None)
     domain_id = (
         scope_id
         if cls.version == FederatedDomainScopedPayload.version else None)
     group_ids = list(map(cls.unpack_group_id, payload[3]))
     (is_stored_as_bytes, idp_id) = payload[4]
     if is_stored_as_bytes:
         idp_id = cls.convert_uuid_bytes_to_hex(idp_id)
     protocol_id = payload[5]
     expires_at_str = cls._convert_float_to_time_string(payload[6])
     audit_ids = list(map(provider.base64_encode, payload[7]))
     federated_info = dict(idp_id=idp_id, protocol_id=protocol_id,
                           group_ids=group_ids)
     trust_id = None
     access_token_id = None
     return (user_id, methods, project_id, domain_id, expires_at_str,
             audit_ids, trust_id, federated_info, access_token_id)
예제 #14
0
 def _from_word2vec_text(fname):
   with _open(fname, 'rb') as fin:
     words = []
     header = unicode(fin.readline())
     vocab_size, layer1_size = list(map(int, header.split())) # throws for invalid file format
     vectors = []
     for line_no, line in enumerate(fin):
       try:
         parts = unicode(line, encoding="utf-8").strip().split()
       except TypeError as e:
         parts = line.strip().split()
       except Exception as e:
         logger.warning("We ignored line number {} because of erros in parsing"
                         "\n{}".format(line_no, e))
         continue
       # We differ from Gensim implementation.
       # Our assumption that a difference of one happens because of having a
       # space in the word.
       if len(parts) == layer1_size + 1:
         word, weights = parts[0], list(map(float32, parts[1:]))
       elif len(parts) == layer1_size + 2:
         word, weights = parts[:2], list(map(float32, parts[2:]))
         word = u" ".join(word)
       else:
         logger.warning("We ignored line number {} because of unrecognized "
                         "number of columns {}".format(line_no, parts[:-layer1_size]))
         continue
       index = line_no
       words.append(word)
       vectors.append(weights)
     vectors = np.asarray(vectors, dtype=np.float32)
     return words, vectors
예제 #15
0
def normboolclass(typename='NormalizedBool', true=None, false=None,
                 ignore='', caseless=True, spaceless=True,
                 base=NormalizedBool):

    if not issubclass(base, NormalizedBool):
        raise TypeError("'base' is no subclass of normboolclass.base: %s"
                        % base)

    # to be stored as .normalize method of created class
    def normalizer(value):
        """Normalize `value` based on normalizing options
           given to :func:`normboolclass`.

        - Any non-string values are just passed through.
        """
        if not isinstance(value, string_types):
            return value
        return normalize(value, ignore=normalizer.ignore,
          caseless=normalizer.caseless, spaceless=normalizer.spaceless)

    # store the normalizing options
    normalizer.ignore = ignore
    normalizer.caseless = caseless
    normalizer.spaceless = spaceless

    if true:
        true = list(map(normalizer, true))
    if false:
        false = list(map(normalizer, false))

    Bool = boolclass(typename, true=true, false=false, base=base)
    type(Bool).normalize = staticmethod(normalizer)
    return Bool
예제 #16
0
 def update(self):
     """Display the train and dev metrics as they come in."""
     reset_screen(self.print_count)
     self.print_count = 0
     labels = self.results.get_labels(self.experiment_hash)
     if not labels:
         return
     max_len = max(map(len, map(lambda x: x.name, labels)))
     data = {}
     for label in labels:
         data[label] = {
             'state': color_state(self.results.get_state(label)),
             'train_stat': self.results.get_recent(label, 'Train', self.train),
             'train_tick': self.results.get_recent(label, 'Train', 'tick'),
             'dev_stat': self.results.get_recent(label, 'Valid', self.dev),
             'dev_tick': self.results.get_recent(label, 'Valid', 'tick'),
         }
     for label in labels:
         print('{state} {name:{width}} - train ({train_metric}): {train_stat:.3f} at {train_tick} dev ({metric}): {dev_stat:.3f} at {dev_tick}'.format(
             name=label.name,
             train_metric=self.train,
             metric=self.dev,
             width=max_len,
             **data[label])
         )
         self.print_count += 1
예제 #17
0
파일: api.py 프로젝트: J4LP/evesrp
def list_entities():
    """Return a JSON object with a list of all of the specified entity type.

    Example output::
        {
          entities: [
            {name: 'Bar', id: 1, source: 'Auth Source', type: 'User'},
            {name: 'Foo', id: 0, source: 'Another Auth Source', type: 'Group'},
            {name: 'Baz', id: 20, source: 'Auth Source', type: 'Group'}
          ]
        }

    This method is only accesible to administrators.

    :param str entity_type: Either ``'user'`` or ``'group'``.
    """
    if not current_user.admin and not \
            current_user.has_permission(PermissionType.admin):
        abort(403)
    user_query = db.session.query(User.id, User.name, User.authmethod)
    group_query = db.session.query(Group.id, Group.name, Group.authmethod)
    users = map(lambda e: {
            u'id': e.id,
            u'name': e.name,
            u'type': u'User',
            u'source': e.authmethod}, user_query)
    groups = map(lambda e: {
            u'id': e.id,
            u'name': e.name,
            u'type': u'Group',
            u'source': e.authmethod}, group_query)
    return jsonify(entities=chain(users, groups))
예제 #18
0
def _argsdicts( args, mydict ):
    """A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1."""
    
    if len( args ) == 0:
        args = None, 
    elif len( args ) == 1:
        args = _totuple( args[0] )
    else:
        raise Exception( "We should have never gotten here." )

    mykeys = list( mydict.keys( ) )
    myvalues = list( map( _totuple, list( mydict.values( ) ) ) )

    maxlength = max( list( map( len, [ args ] + myvalues ) ) )

    for i in range( maxlength ):
        thisdict = { }
        for key, value in zip( mykeys, myvalues ):
            try:
                thisdict[ key ] = value[i]
            except IndexError:
                thisdict[ key ] = value[-1]
        try:
            thisarg = args[i]
        except IndexError:
            thisarg = args[-1]

        yield thisarg, thisdict
예제 #19
0
파일: defaults.py 프로젝트: NMGRL/pychron
def parse_irradiation_tray_map(p):
    """
        return list of  x,y,r tuples or None if exception
    """
    try:
        with open(p, 'r') as rfile:
            h = rfile.readline()
            _, diam = list(map(str.strip, h.split(',')))
            holes = []
            for i, l in enumerate(rfile):
                try:
                    args = list(map(float, l.strip().split(',')))
                    if len(args) == 2:
                        r = diam
                    else:
                        r = args[2]

                    holes.append((args[0], args[1], float(r)))

                except ValueError:
                    break

            return holes
    except Exception as e:
        traceback.print_exc()
        return
예제 #20
0
파일: sorted.py 프로젝트: DMOJ/judge
def check(process_output, judge_output, split_on='lines', **kwargs):
    split_pattern = {
        'lines': b'[\r\n]',
        'whitespace': b'[\s]',
    }.get(split_on)

    if not split_pattern:
        raise InternalError('invalid `split_on` mode')

    process_lines = list(filter(None, resplit(split_pattern, utf8bytes(process_output))))
    judge_lines = list(filter(None, resplit(split_pattern, utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    if split_on == 'lines':
        process_lines = list(map(six.binary_type.split, process_lines))
        judge_lines = list(map(six.binary_type.split, judge_lines))

    process_lines.sort()
    judge_lines.sort()

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line != judge_line:
            return False

    return True
예제 #21
0
    def each(self, *funcs):
        """
        Call `func` on each element in the collection.

        If multiple functions are provided, each item
        in the output will be a tuple of each
        func(item) in self.

        Returns a new Collection.

        Example:

            >>> col = Collection([Scalar(1), Scalar(2)])
            >>> col.each(Q * 10)
            Collection([Scalar(10), Scalar(20)])
            >>> col.each(Q * 10, Q - 1)
            Collection([Scalar((10, 0)), Scalar((20, 1))])
        """

        funcs = list(map(_make_callable, funcs))

        if len(funcs) == 1:
            return Collection(map(funcs[0], self._items))

        tupler = lambda item: Scalar(
            tuple(_unwrap(func(item)) for func in funcs))
        return Collection(map(tupler, self._items))
예제 #22
0
파일: models.py 프로젝트: BlueNexus/evesrp
 def _json(self, extended=False):
     try:
         parent = super(Request, self)._json(extended)
     except AttributeError:
         parent = {}
     parent[u'href'] = url_for('requests.get_request_details',
             request_id=self.id)
     attrs = (u'killmail_url', u'kill_timestamp', u'pilot',
              u'alliance', u'corporation', u'submitter',
              u'division', u'status', u'base_payout', u'payout',
              u'details', u'id', u'ship_type', u'system', u'constellation',
              u'region')
     for attr in attrs:
         if attr == u'ship_type':
             parent['ship'] = self.ship_type
         elif u'payout' in attr:
             payout = getattr(self, attr)
             parent[attr] = payout.currency()
         else:
             parent[attr] = getattr(self, attr)
     parent[u'submit_timestamp'] = self.timestamp
     if extended:
         parent[u'actions'] = map(lambda a: a._json(True), self.actions)
         parent[u'modifiers'] = map(lambda m: m._json(True), self.modifiers)
         parent[u'valid_actions'] = self.valid_actions(current_user)
         parent[u'transformed'] = dict(self.transformed)
     return parent
예제 #23
0
    def print_kernel_info(self, output_file=sys.stdout):
        table = ('     idx |        min        max       step\n' +
                 '---------+---------------------------------\n')
        for l in self._loop_stack:
            table += '{:>8} | {!s:>10} {!s:>10} {:>+10}\n'.format(*l)
        print(prefix_indent('loop stack:        ', table), file=output_file)

        table = ('    name |  offsets   ...\n' +
                 '---------+------------...\n')
        for name, offsets in list(self._sources.items()):
            prefix = '{:>8} | '.format(name)
            right_side = '\n'.join([', '.join(map(tuple.__repr__, o)) for o in offsets])
            table += prefix_indent(prefix, right_side, later_prefix='         | ')
        print(prefix_indent('data sources:      ', table), file=output_file)

        table = ('    name |  offsets   ...\n' +
                 '---------+------------...\n')
        for name, offsets in list(self._destinations.items()):
            prefix = '{:>8} | '.format(name)
            right_side = '\n'.join([', '.join(map(tuple.__repr__, o)) for o in offsets])
            table += prefix_indent(prefix, right_side, later_prefix='         | ')
        print(prefix_indent('data destinations: ', table), file=output_file)

        table = (' op | count \n' +
                 '----+-------\n')
        for op, count in list(self._flops.items()):
            table += '{:>3} | {:>4}\n'.format(op, count)
        table += '     =======\n'
        table += '      {:>4}'.format(sum(self._flops.values()))
        print(prefix_indent('FLOPs:     ', table), file=output_file)
예제 #24
0
파일: glyphset.py 프로젝트: Vasyka/hat
    def __call__(self, vals):
        if not self.colMajor:
            shapes = [list(map(lambda f: f(val), self.fns)) for val in vals]
        else:
            shapes = [list(map(lambda f: f(val), self.fns)) for val in zip(*vals)]

        return shapes
예제 #25
0
def sub_miller(segments):
    '''
    Calculates Miller indices from segments.

    Algorithm:

    1. Obtain inverted fraction from segments

    2. Find common denominator of inverted fractions

    3. Lead fractions to common denominator and throws denominator away.

    4. Return obtained values.

    Args:
        List of 3 floats, meaning distances that plane cuts on x, y, z axes.
        Any float not equals zero, it means that plane does not intersect origin,
        i. e. shift of origin has already been done.

    Returns:
        String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
    '''
    fracts = [segment_to_fraction(segment) for segment in segments]
    common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
    miller_indices = ([
        fract.numerator * math.fabs(common_denominator) / fract.denominator
        for fract in fracts
    ])
    return'(' + ','.join(map(str, list(map(decimal.Decimal, miller_indices)))) + ')'
예제 #26
0
파일: util_hash.py 프로젝트: Erotemic/utool
def augment_uuid(uuid_, *hashables):
    #from six.moves import reprlib
    #uuidhex_data   = uuid_.get_bytes()
    uuidhex_data   = uuid_.bytes
    #hashable_str    = ''.join(map(repr, hashables))
    # Python 2 and 3 diverge here because repr returns
    # ascii data in python2 and unicode text in python3
    # it would be nice to
    warnings.warn('[ut] should not use repr when hashing', RuntimeWarning)
    def tmprepr(x):
        y = repr(x)
        # hack to remove u prefix
        if isinstance(x, six.string_types):
            if y.startswith('u'):
                y = y[1:]
        return y
    if six.PY2:
        hashable_text = ''.join(map(tmprepr, hashables))
        hashable_data = hashable_text.encode('utf-8')
        #hashable_data = b''.join(map(bytes, hashables))
    elif six.PY3:
        hashable_text    = ''.join(map(tmprepr, hashables))
        hashable_data = hashable_text.encode('utf-8')
        #hashable_data = b''.join(map(bytes, hashables))
    augmented_data   = uuidhex_data + hashable_data
    augmented_uuid_ = hashable_to_uuid(augmented_data)
    return augmented_uuid_
예제 #27
0
 def _printHRInfo(self, dat):
   """
     Produce human-readable summary of available information.
     
     Parameters
     ----------
     dat : dictionary
         Output of `selectByPlanetName`.
   """
   # Search keys with/without errors
   kwe, kwoe = [], []
   for k in six.iterkeys(dat):
     if k.find("_error") != -1:
       # Ignore keys containing the '_error' phrase
       continue
     if k.find("_err_") != -1:
       # Ignore keys containing the '_error' phrase
       continue        
     if ((k + "_error_min") in dat) or ((k + "_err_min") in dat):
       # Key has error
       kwe.append(k)
     else:
       # Key has no error
       kwoe.append(k)
   
   # Maximal length of column name
   maxlen = max(list(smo.map(lambda x:len(x), self.getColnames())))
   # Get units
   units = {c:self.getUnitOf(c) for c in self.getColnames()}
   # Maximum length of unit
   mlu = max(list(smo.map(lambda x:len(x), units.values())))
   
   lines = []
   
   for k in self.getColnames():
     if (k.find("_error") != -1) or (k.find("_err_") != -1):
       # Ignore keys containing the '_error' phrase
       continue
     if k in kwoe:
       lines.append( ("%" + str(maxlen) + "s") % k + ("  [%" + str(mlu) + "s]  ") % units[k] + str(dat[k]))
     else:
       try:
         # Try _error_ to locate errors
         ep = "_error_"
         lines.append(("%" + str(maxlen) + "s") % k + ("  [%" + str(mlu) + "s]  ") % units[k] + str(dat[k]) + "(+" + str(dat[k+ep+"max"]) + ", -" + str(dat[k+ep+"min"]) + ")")
       except KeyError:
         # Try _err_ to locate errors
         ep = "_err_"
         lines.append(("%" + str(maxlen) + "s") % k + ("  [%" + str(mlu) + "s]  ") % units[k] + str(dat[k]) + "(+" + str(dat[k+ep+"max"]) + ", -" + str(dat[k+ep+"min"]) + ")")
       except:
         raise
   
   # Maximum length of output line
   mll = max(list(smo.map(lambda x:len(x), lines)))
   
   # Print to screen
   print("-"*mll)
   for l in lines :
     print(l)   
   print("-"*mll)    
예제 #28
0
 def disassemble(cls, payload):
     (is_stored_as_bytes, user_id) = payload[0]
     if is_stored_as_bytes:
         user_id = cls.convert_uuid_bytes_to_hex(user_id)
     methods = auth_plugins.convert_integer_to_method_list(payload[1])
     group_ids = list(map(cls.unpack_group_id, payload[2]))
     (is_stored_as_bytes, idp_id) = payload[3]
     if is_stored_as_bytes:
         idp_id = cls.convert_uuid_bytes_to_hex(idp_id)
     else:
         idp_id = idp_id.decode('utf-8')
     protocol_id = payload[4]
     if isinstance(protocol_id, six.binary_type):
         protocol_id = protocol_id.decode('utf-8')
     expires_at_str = cls._convert_float_to_time_string(payload[5])
     audit_ids = list(map(cls.base64_encode, payload[6]))
     federated_info = dict(group_ids=group_ids, idp_id=idp_id,
                           protocol_id=protocol_id)
     system = None
     project_id = None
     domain_id = None
     trust_id = None
     access_token_id = None
     app_cred_id = None
     return (user_id, methods, system, project_id, domain_id,
             expires_at_str, audit_ids, trust_id, federated_info,
             access_token_id, app_cred_id)
예제 #29
0
    def dump_power_calibration(self, coefficients, calibration_path=None):

        #        calibration_path = self._get_calibration_path(calibration_path)
        #        self.info('dumping power calibration {}'.format(calibration_path))

        coeffstr = lambda c: 'calibration coefficients= {}'.format(', '.join(map('{:0.3e}'.format, c)))
        self.info(coeffstr(coefficients))
        #        if bounds:
        #            for coeffs, bi in zip(coefficients, bounds):
        #                self.info('calibration coefficient')
        #            self.info('{} min={:0.2f}, max={:0.2f}'.format(coeffstr(coeffs, *bi)))
        #        else:
        #            self.info(coeffstr(coefficients))
        #
        #        pc = MeterCalibration(coefficients)
        #        pc.bounds = bounds
        #        try:
        #            with open(calibration_path, 'wb') as f:
        #                pickle.dump(pc, f)
        #        except  (pickle.PickleError, EOFError, OSError), e:
        #            self.warning('pickling error {}'.format(e))

        # also update logic board configuration file
        if self.parent is not None:
            lb = self.parent.laser_controller
            config = lb.get_configuration()
            section = 'PowerOutput'
            if not config.has_section(section):
                config.add_section(section)

            config.set(section,
                       'coefficients',
                       ','.join(map('{:0.3e}'.format, coefficients))
                       )
            lb.write_configuration(config)
예제 #30
0
 def disassemble(cls, payload):
     (is_stored_as_bytes, user_id) = payload[0]
     if is_stored_as_bytes:
         user_id = cls.convert_uuid_bytes_to_hex(user_id)
     else:
         # NOTE(cmurphy): The user ID of shadowed federated users is no
         # longer a UUID but a sha256 hash string, and so it should not be
         # converted to a byte string since it is not a UUID format.
         # However. on python3 msgpack returns the serialized input as a
         # byte string anyway. Similar to other msgpack'd values in the
         # payload, we need to explicitly decode it to a string value.
         if six.PY3 and isinstance(user_id, six.binary_type):
             user_id = user_id.decode('utf-8')
     methods = auth_plugins.convert_integer_to_method_list(payload[1])
     group_ids = list(map(cls.unpack_group_id, payload[2]))
     (is_stored_as_bytes, idp_id) = payload[3]
     if is_stored_as_bytes:
         idp_id = cls.convert_uuid_bytes_to_hex(idp_id)
     else:
         idp_id = idp_id.decode('utf-8')
     protocol_id = payload[4]
     if isinstance(protocol_id, six.binary_type):
         protocol_id = protocol_id.decode('utf-8')
     expires_at_str = cls._convert_float_to_time_string(payload[5])
     audit_ids = list(map(cls.base64_encode, payload[6]))
     system = None
     project_id = None
     domain_id = None
     trust_id = None
     access_token_id = None
     app_cred_id = None
     return (user_id, methods, system, project_id, domain_id,
             expires_at_str, audit_ids, trust_id, group_ids, idp_id,
             protocol_id, access_token_id, app_cred_id)
예제 #31
0
 def __call__(self, value):
     return tuple(map(self._dtype, value))
예제 #32
0
    def __init__(self, fig, *args, **kwargs):
        """
        Parameters
        ----------
        fig : :class:`matplotlib.figure.Figure`
        args : tuple (*numRows*, *numCols*, *plotNum*)
            The array of subplots in the figure has dimensions *numRows*,
            *numCols*, and *plotNum* is the number of the subplot
            being created.  *plotNum* starts at 1 in the upper left
            corner and increases to the right.

            If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
            decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
        """

        self.figure = fig

        if len(args) == 1:
            if isinstance(args[0], SubplotSpec):
                self._subplotspec = args[0]
            else:
                try:
                    s = str(int(args[0]))
                    rows, cols, num = map(int, s)
                except ValueError:
                    raise ValueError(
                        'Single argument to subplot must be a 3-digit integer')
                self._subplotspec = GridSpec(rows, cols)[num - 1]
                # num - 1 for converting from MATLAB to python indexing
        elif len(args) == 3:
            rows, cols, num = args
            rows = int(rows)
            cols = int(cols)
            if isinstance(num, tuple) and len(num) == 2:
                num = [int(n) for n in num]
                self._subplotspec = GridSpec(rows, cols)[num[0] - 1:num[1]]
            else:
                self._subplotspec = GridSpec(rows, cols)[int(num) - 1]
                # num - 1 for converting from MATLAB to python indexing
        else:
            raise ValueError('Illegal argument(s) to subplot: %s' % (args, ))

        # total = rows*cols
        # num -= 1    # convert from matlab to python indexing
        #             # i.e., num in range(0,total)
        # if num >= total:
        #     raise ValueError( 'Subplot number exceeds total subplots')
        # self._rows = rows
        # self._cols = cols
        # self._num = num

        # self.update_params()

        # sets self.fixbox
        self.update_params()

        pos = self.figbox.bounds

        horizontal = kwargs.pop("horizontal", [])
        vertical = kwargs.pop("vertical", [])
        aspect = kwargs.pop("aspect", None)
        anchor = kwargs.pop("anchor", "C")

        if kwargs:
            raise Exception("")

        Divider.__init__(self,
                         fig,
                         pos,
                         horizontal,
                         vertical,
                         aspect=aspect,
                         anchor=anchor)
예제 #33
0
def check_UML_module():
    all_classes = list(map(getattr, [UML] * len(dir(UML)), dir(UML)))
    for c in all_classes:
        if not isinstance(c, uml2.Element):
            continue
예제 #34
0
 def serialize(self, data):
     if isinstance(data, (list, AttrList, tuple)):
         return list(map(self._serialize, data))
     return self._serialize(data)
예제 #35
0
def make_score_tabular(row_lbls,
                       col_lbls,
                       values,
                       title=None,
                       out_of=None,
                       bold_best=False,
                       flip=False,
                       bigger_is_better=True,
                       multicol_lbls=None,
                       FORCE_INT=False,
                       precision=None,
                       SHORTEN_ROW_LBLS=False,
                       col_align='l',
                       col_sep='|',
                       multicol_sep='|',
                       centerline=True,
                       astable=False,
                       table_position='',
                       AUTOFIX_LATEX=True,
                       **kwargs):
    r"""
    makes a LaTeX tabular for displaying scores or errors

    Args:
        row_lbls (list of str):
        col_lbls (list of str):
        values (ndarray):
        title (str):  (default = None)
        out_of (None): (default = None)
        bold_best (bool): (default = True)
        flip (bool): (default = False)
        table_position (str) : eg '[h]'

    Returns:
        str: tabular_str

    CommandLine:
        python -m utool.util_latex --test-make_score_tabular:0 --show
        python -m utool.util_latex --test-make_score_tabular:1 --show
        python -m utool.util_latex --test-make_score_tabular:2 --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_latex import *  # NOQA
        >>> import utool as ut
        >>> row_lbls = ['config1', 'config2']
        >>> col_lbls = ['score \leq 1', 'metric2']
        >>> values = np.array([[1.2, 2], [3.2, 4]])
        >>> title = 'title'
        >>> out_of = 10
        >>> bold_best = True
        >>> flip = False
        >>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
        >>> result = tabular_str
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> render_latex_text(tabular_str)

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_latex import *  # NOQA
        >>> import utool as ut
        >>> row_lbls = ['config1']
        >>> col_lbls = ['score \leq 1', 'metric2']
        >>> values = np.array([[1.2, 2]])
        >>> title = 'title'
        >>> out_of = 10
        >>> bold_best = True
        >>> flip = False
        >>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
        >>> result = tabular_str
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> render_latex_text(tabular_str)

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_latex import *  # NOQA
        >>> import utool as ut
        >>> row_lbls = ['config1', 'config2']
        >>> col_lbls = ['score \leq 1', 'metric2', 'foobar']
        >>> multicol_lbls = [('spam', 1), ('eggs', 2)]
        >>> values = np.array([[1.2, 2, -3], [3.2, 4, -2]])
        >>> title = 'title'
        >>> out_of = 10
        >>> bold_best = True
        >>> flip = False
        >>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip, multicol_lbls=multicol_lbls)
        >>> result = tabular_str
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> render_latex_text(tabular_str)
    """
    import utool as ut
    if flip:
        bigger_is_better = not bigger_is_better
        flip_repltups = [('<=', '>'), ('>', '<='), ('\\leq', '\\gt'),
                         ('\\geq', '\\lt'), ('score', 'error')]
        col_lbls = [replace_all(lbl, flip_repltups) for lbl in col_lbls]
        if title is not None:
            title = replace_all(title, flip_repltups)
        if out_of is not None:
            values = out_of - values

    # Abbreviate based on common substrings
    common_rowlbl = None
    if SHORTEN_ROW_LBLS:
        if isinstance(row_lbls, list):
            row_lbl_list = row_lbls
        else:
            row_lbl_list = row_lbls.flatten().tolist()
        # Split the rob labels into the alg components
        #algcomp_list = [lbl.split(')_') for lbl in row_lbl_list]
        longest = long_substr(row_lbl_list)
        common_strs = []
        while len(longest) > 10:
            common_strs += [longest]
            row_lbl_list = [
                row.replace(longest, '...') for row in row_lbl_list
            ]
            longest = long_substr(row_lbl_list)
        common_rowlbl = ('...'.join(common_strs)).replace(')_', ')_\n')
        row_lbls = row_lbl_list
        if len(row_lbl_list) == 1:
            common_rowlbl = row_lbl_list[0]
            row_lbls = ['0']

    # Stack values into a tabular body
    # TODO: need ability to specify datatypes
    def ensurelist(row_values):
        try:
            return row_values.tolist()
        except AttributeError:
            return row_values

    if False:
        # Numpy formatting
        def padvec(shape=(1, 1)):
            pad = np.array([[' ' for c in range(shape[1])]
                            for r in range(shape[0])])
            return pad

        col_lbls = ensure_rowvec(col_lbls)
        row_lbls = ensure_colvec(row_lbls)
        _0 = np.vstack([padvec(), row_lbls])
        _1 = np.vstack([col_lbls, values])
        body = np.hstack([_0, _1])
        body = [[str_ for str_ in row] for row in body]
    else:
        assert len(row_lbls) == len(values)
        body = [[' '] + col_lbls]
        body += [[row_lbl] + ensurelist(row_values)
                 for row_lbl, row_values in zip(row_lbls, values)]
    #import utool as ut
    # Fix things in each body cell
    DO_PERCENT = True
    try:
        for r in range(len(body)):
            for c in range(len(body[0])):
                # In data land
                if r > 0 and c > 0:
                    if precision is not None:
                        # Hack
                        if ut.is_float(body[r][c]):
                            fmtstr = '%.' + str(precision) + 'f'
                            body[r][c] = fmtstr % (float(body[r][c]), )
                    # Force integer
                    if FORCE_INT:
                        body[r][c] = str(int(float(body[r][c])))
                body[r][c] = str(body[r][c])
                # Remove bad formatting;
                if AUTOFIX_LATEX:
                    body[r][c] = escape_latex(body[r][c])
    except Exception as ex:
        import utool as ut
        print('len(row_lbls) = %r' % (len(row_lbls), ))
        print('len(col_lbls) = %r' % (len(col_lbls), ))
        print('len(values) = %r' % (values, ))
        print('ut.depth_profile(values) = %r' % (ut.depth_profile(values), ))
        util_dbg.printex(ex, keys=['r', 'c'])
        raise

    # Bold the best values
    if bold_best:
        best_col_scores = values.max(0) if bigger_is_better else values.min(0)
        rows_to_bold = [
            np.where(values[:, colx] == best_col_scores[colx])[0]
            for colx in range(len(values.T))
        ]
        for colx, rowx_list in enumerate(rows_to_bold):
            for rowx in rowx_list:
                body[rowx + 1][colx +
                               1] = '\\txtbf{' + body[rowx + 1][colx + 1] + '}'

    # More fixing after the bold is in place
    for r in range(len(body)):
        for c in range(len(body[0])):
            # In data land
            if r > 0 and c > 0:
                if out_of is not None:
                    body[r][c] = body[r][c] + '/' + str(out_of)
                    if DO_PERCENT:
                        percent = ' = %.1f%%' % float(
                            100 * values[r - 1, c - 1] / out_of)
                        body[r][c] += escape_latex(percent)

    # Align columns for pretty printing
    body = np.array(body)
    ALIGN_BODY = True
    if ALIGN_BODY:
        new_body_cols = []
        for col in body.T:
            colstrs = list(map(str, ensurelist(col)))
            collens = list(map(len, colstrs))
            maxlen = max(collens)
            newcols = [str_ + (' ' * (maxlen - len(str_))) for str_ in colstrs]
            new_body_cols += [newcols]
        body = np.array(new_body_cols).T

    # Build Body (and row layout)
    HLINE_SEP = True
    rowvalsep = ''
    colvalsep = ' & '
    endl = '\\\\\n'
    hline = r'\hline'
    #extra_rowsep_pos_list = [1]  # rows to insert an extra hline after
    extra_rowsep_pos_list = []  # rows to insert an extra hline after
    if HLINE_SEP:
        rowvalsep = hline + '\n'
    # rowstr list holds blocks of rows
    rowstr_list = [colvalsep.join(row) + endl for row in body]
    #rowstr_list = [row[0] + rowlbl_sep + colvalsep.join(row[1:]) + endl for row in body]
    #rowstr_list = [(
    #    ('' if len(row) == 0 else row[0])
    #    if len(row) <= 1 else
    #    row[0] + rowlblcol_sep + colvalsep.join(row[1:]) + endl)
    #    for row in body]
    rowsep_list = [rowvalsep for row in rowstr_list[0:-1]
                   ]  # should be len 1 less than rowstr_list
    # Insert multicolumn names
    if multicol_lbls is not None:
        # TODO: label of the row labels
        multicol_sep
        multicols = [
            latex_multicolumn(multicol, size, 'c' + multicol_sep)
            for multicol, size in multicol_lbls
        ]
        multicol_str = latex_multirow(
            '', 2) + colvalsep + colvalsep.join(multicols) + endl
        ncols = sum([tup[1] for tup in multicol_lbls])
        mcol_sep = '\\cline{2-%d}\n' % (ncols + 1, )
        rowstr_list = [multicol_str] + rowstr_list
        rowsep_list = [mcol_sep] + rowsep_list
        #extra_rowsep_pos_list += [1]

    # Insert title
    if title is not None and not astable:
        tex_title = latex_multicolumn(title, len(body[0])) + endl
        rowstr_list = [tex_title] + rowstr_list
        rowsep_list = [rowvalsep] + rowsep_list
        #extra_rowsep_pos_list += [2]

    # Apply an extra hline (for label)
    #extra_rowsep_pos_list = []
    for pos in sorted(extra_rowsep_pos_list)[::-1]:
        rowstr_list.insert(pos, '')
        rowsep_list.insert(pos, rowvalsep)
    #tabular_body = rowvalsep.join(rowstr_list)
    from six.moves import zip_longest
    tabular_body = ''.join([
        row if sep is None else row + sep
        for row, sep in zip_longest(rowstr_list, rowsep_list)
    ])

    # Build Column Layout
    col_align_list = [col_align] * len(body[0])
    #extra_collayoutsep_pos_list = [1]
    extra_collayoutsep_pos_list = []
    for pos in sorted(extra_collayoutsep_pos_list)[::-1]:
        col_align_list.insert(pos, '')
    #col_layaout_sep_list = rowlblcol_sep  # TODO

    rowlblcol_sep = '|'
    # Build build internal seprations between column alignments
    # Defaults to just the normal col_sep
    col_align_sep_list = [col_sep] * (len(col_align_list) - 1)
    # Adjust for the separations between row labels and the actual row data
    if len(col_align_sep_list) > 0:
        col_align_sep_list[0] = rowlblcol_sep
    # Continue multicolumn sepratation
    if multicol_lbls is not None:
        multicol_offsets = ut.cumsum(ut.get_list_column(multicol_lbls, 1))
        for offset in multicol_offsets:
            if offset < len(col_align_sep_list):
                col_align_sep_list[offset] = multicol_sep

    from six.moves import zip_longest
    _tmp = [
        ut.filter_Nones(tup)
        for tup in zip_longest(col_align_list, col_align_sep_list)
    ]
    col_layout = ''.join(ut.flatten(_tmp))

    #if len(col_align_list) > 1:
    #    col_layout = col_align_list[0] + rowlblcol_sep + col_sep.join(col_align_list[1:])
    #else:
    #    col_layout = col_sep.join(col_align_list)

    tabular_head = (r'\begin{tabular}{|%s|}' % col_layout) + '\n'
    tabular_tail = r'\end{tabular}'

    if centerline:
        tabular_head = r'\centerline{' + '\n' + tabular_head
        tabular_tail = tabular_tail + '}'

    if astable:
        #tabular_head = r'\begin{centering}' + '\n' + tabular_head
        tabular_head = r'\centering' + '\n' + tabular_head
        tabular_head = r'\begin{table}' + table_position + '\n' + tabular_head

        lblstr = latex_sanitize_command_name(kwargs.get('label', title))
        caption = title
        if AUTOFIX_LATEX:
            caption = escape_latex(caption)
        caption = '\n% ---\n' + caption + '\n% ---\n'
        #tabular_head = r'\end{centering}' + '\n' + tabular_head
        tabular_tail = tabular_tail + '\n\caption[%s]{%s}\n\label{tbl:%s}\n\end{table}' % (
            lblstr, caption, lblstr)

    tabular_str = rowvalsep.join([tabular_head, tabular_body, tabular_tail])
    topsep = '\\hline\n' if True else '\\toprule\n'
    botsep = '\\hline\n' if True else '\\bottomrule\n'
    tabular_str = tabular_head + topsep + tabular_body + botsep + tabular_tail

    if common_rowlbl is not None:
        #tabular_str += escape_latex('\n\nThe following parameters were held fixed:\n' + common_rowlbl)
        pass
    return tabular_str
예제 #36
0
 def test_Never(self):
     should_trigger = Never().should_trigger
     self.assertFalse(any(map(should_trigger, self.minutes)))
예제 #37
0
 def test_Always(self):
     should_trigger = Always().should_trigger
     self.assertTrue(all(map(should_trigger, self.minutes)))
예제 #38
0
def build_toco_convert_protos(input_tensors,
                              output_tensors,
                              inference_type=lite_constants.FLOAT,
                              inference_input_type=None,
                              input_format=lite_constants.TENSORFLOW_GRAPHDEF,
                              input_shapes=None,
                              output_format=lite_constants.TFLITE,
                              quantized_input_stats=None,
                              default_ranges_stats=None,
                              drop_control_dependency=True,
                              reorder_across_fake_quant=False,
                              allow_custom_ops=False,
                              change_concat_input_ranges=False,
                              post_training_quantize=False,
                              quantize_to_float16=False,
                              dump_graphviz_dir=None,
                              dump_graphviz_video=False,
                              target_ops=None,
                              allow_nonexistent_arrays=False,
                              debug_info=None,
                              conversion_summary_dir=None):
    """Builds protocol buffers describing a conversion of a model using TOCO.

  Typically this is to convert from TensorFlow GraphDef to TFLite, in which
  case the default `input_format` and `output_format` are sufficient.

  Args:
    input_tensors: List of input tensors. Type and shape are computed using
      `foo.shape` and `foo.dtype`.
    output_tensors: List of output tensors (only .name is used from this).
    inference_type: Target data type of real-number arrays in the output file.
      Must be `{tf.float32, tf.uint8}`.  (default tf.float32)
      Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
    inference_input_type: Target data type of real-number input arrays. Allows
      for a different type for input arrays in the case of quantization.
    input_format: Type of data to read Currently must be
      `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
    input_shapes: Input array shape. It needs to be a list of the same length
      as `input_tensors`, or None. (default None)
    output_format: Output file format. Currently must be `{TFLITE,
      GRAPHVIZ_DOT}`. (default TFLITE)
    quantized_input_stats: List of tuples of floats representing the mean and
      standard deviation. Each tuple maps to the corresponding input tensor.
      Only need if `inference_input_type` is `QUANTIZED_UINT8`.
      real_input_value = (quantized_input_value - mean_value) / std_dev_value.
      (default None)
    default_ranges_stats: Tuple of integers representing (min, max) range values
      for all arrays without a specified range. Intended for experimenting with
      quantization via "dummy quantization". (default None)
    drop_control_dependency: Boolean indicating whether to drop control
      dependencies silently. This is due to TFLite not supporting control
      dependencies. (default True)
    reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
      nodes in unexpected locations. Used when the location of the FakeQuant
      nodes is preventing graph transformations necessary to convert the graph.
      Results in a graph that differs from the quantized training graph,
      potentially causing differing arithmetic behavior. (default False)
    allow_custom_ops: Boolean indicating whether to allow custom operations.
      When false any unknown operation is an error. When true, custom ops are
      created for any op that is unknown. The developer will need to provide
      these to the TensorFlow Lite runtime with a custom resolver.
      (default False)
    change_concat_input_ranges: Boolean to change behavior of min/max ranges for
      inputs and outputs of the concat operator for quantized models. Changes
      the ranges of concat operator overlap when true. (default False)
    post_training_quantize: Boolean indicating whether to quantize the weights
      of the converted float model. Model size will be reduced and there will be
      latency improvements (at the cost of accuracy).
      (default False)
    quantize_to_float16: Boolean indicating whether to convert float buffers
        to float16. (default False)
    dump_graphviz_dir: Full filepath of folder to dump the graphs at various
      stages of processing GraphViz .dot files. Preferred over
      --output_format=GRAPHVIZ_DOT in order to keep the requirements of the
      output file. (default None)
    dump_graphviz_video: Boolean indicating whether to dump the graph after
      every graph transformation. (default False)
    target_ops: Experimental flag, subject to change. Set of OpsSet
      options indicating which converter to use.
      (default set([OpsSet.TFLITE_BUILTINS]))
    allow_nonexistent_arrays: Allow specifying array names that don't exist
      or are unused in the final graph. (default False)
    debug_info: `GraphDebugInfo` proto containing the stack traces for the
      original nodes referred by the converted graph.
    conversion_summary_dir: A string, the path to the generated conversion logs.

  Returns:
    model_flags, toco_flags, debug_info: three protocol buffers describing the
      conversion process and debug information.

  Raises:
    ValueError:
      If the input tensor type is unknown
      Missing mean_values or std_dev_values
    RuntimeError: If TOCO fails to convert (in which case the runtime error's
      error text will contain the TOCO error log)
  """
    toco = _toco_flags_pb2.TocoFlags()
    toco.input_format = input_format
    toco.output_format = output_format
    toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
    if inference_input_type:
        toco.inference_input_type = util.convert_dtype_to_tflite_type(
            inference_input_type)
    else:
        toco.inference_input_type = toco.inference_type
    toco.drop_control_dependency = drop_control_dependency
    toco.reorder_across_fake_quant = reorder_across_fake_quant
    toco.allow_custom_ops = allow_custom_ops
    toco.post_training_quantize = post_training_quantize
    toco.quantize_to_float16 = quantize_to_float16
    if default_ranges_stats:
        toco.default_ranges_min = default_ranges_stats[0]
        toco.default_ranges_max = default_ranges_stats[1]
    if dump_graphviz_dir:
        toco.dump_graphviz_dir = dump_graphviz_dir
    toco.dump_graphviz_include_video = dump_graphviz_video
    if conversion_summary_dir:
        toco.conversion_summary_dir = conversion_summary_dir
    if target_ops:
        if set(target_ops) == set(
            [OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
            toco.enable_select_tf_ops = True
        elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
            toco.enable_select_tf_ops = True
            toco.force_select_tf_ops = True

    model = _model_flags_pb2.ModelFlags()
    model.change_concat_input_ranges = change_concat_input_ranges
    for idx, input_tensor in enumerate(input_tensors):
        input_array = model.input_arrays.add()
        input_array.name = util.get_tensor_name(input_tensor)
        input_array.data_type = util.convert_dtype_to_tflite_type(
            input_tensor.dtype)

        if toco.inference_input_type in \
            [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:
            if not quantized_input_stats:
                raise ValueError("std_dev and mean must be defined when "
                                 "inference_input_type is QUANTIZED_UINT8.")
            input_array.mean_value, input_array.std_value = quantized_input_stats[
                idx]
        if input_shapes is None:
            shape = input_tensor.shape
        else:
            shape = input_shapes[idx]
        input_array.shape.dims.extend(list(map(int, shape)))

    for output_tensor in output_tensors:
        model.output_arrays.append(util.get_tensor_name(output_tensor))

    model.allow_nonexistent_arrays = allow_nonexistent_arrays

    return model, toco, debug_info
예제 #39
0
def ParseGroups(resource_class, groups):
    return list(
        map(resource_class.AdvertisedGroupsValueListEntryValuesEnum, groups))
예제 #40
0
 def __call__(self, value):
     return list(map(self._dtype, value))
예제 #41
0
def filter_events_for_client(store,
                             user_id,
                             events,
                             is_peeking=False,
                             always_include_ids=frozenset()):
    """
    Check which events a user is allowed to see

    Args:
        store (synapse.storage.DataStore): our datastore (can also be a worker
            store)
        user_id(str): user id to be checked
        events(list[synapse.events.EventBase]): sequence of events to be checked
        is_peeking(bool): should be True if:
          * the user is not currently a member of the room, and:
          * the user has not been a member of the room since the given
            events
        always_include_ids (set(event_id)): set of event ids to specifically
            include (unless sender is ignored)

    Returns:
        Deferred[list[synapse.events.EventBase]]
    """
    types = (
        (EventTypes.RoomHistoryVisibility, ""),
        (EventTypes.Member, user_id),
    )
    event_id_to_state = yield store.get_state_for_events(
        frozenset(e.event_id for e in events),
        types=types,
    )

    ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
        "m.ignored_user_list",
        user_id,
    )

    # FIXME: This will explode if people upload something incorrect.
    ignore_list = frozenset(
        ignore_dict_content.get("ignored_users", {}).keys(
        ) if ignore_dict_content else [])

    erased_senders = yield store.are_users_erased((e.sender for e in events))

    def allowed(event):
        """
        Args:
            event (synapse.events.EventBase): event to check

        Returns:
            None|EventBase:
               None if the user cannot see this event at all

               a redacted copy of the event if they can only see a redacted
               version

               the original event if they can see it as normal.
        """
        if not event.is_state() and event.sender in ignore_list:
            return None

        if event.event_id in always_include_ids:
            return event

        state = event_id_to_state[event.event_id]

        # get the room_visibility at the time of the event.
        visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""),
                                     None)
        if visibility_event:
            visibility = visibility_event.content.get("history_visibility",
                                                      "shared")
        else:
            visibility = "shared"

        if visibility not in VISIBILITY_PRIORITY:
            visibility = "shared"

        # Always allow history visibility events on boundaries. This is done
        # by setting the effective visibility to the least restrictive
        # of the old vs new.
        if event.type == EventTypes.RoomHistoryVisibility:
            prev_content = event.unsigned.get("prev_content", {})
            prev_visibility = prev_content.get("history_visibility", None)

            if prev_visibility not in VISIBILITY_PRIORITY:
                prev_visibility = "shared"

            new_priority = VISIBILITY_PRIORITY.index(visibility)
            old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
            if old_priority < new_priority:
                visibility = prev_visibility

        # likewise, if the event is the user's own membership event, use
        # the 'most joined' membership
        membership = None
        if event.type == EventTypes.Member and event.state_key == user_id:
            membership = event.content.get("membership", None)
            if membership not in MEMBERSHIP_PRIORITY:
                membership = "leave"

            prev_content = event.unsigned.get("prev_content", {})
            prev_membership = prev_content.get("membership", None)
            if prev_membership not in MEMBERSHIP_PRIORITY:
                prev_membership = "leave"

            # Always allow the user to see their own leave events, otherwise
            # they won't see the room disappear if they reject the invite
            if membership == "leave" and (prev_membership == "join"
                                          or prev_membership == "invite"):
                return event

            new_priority = MEMBERSHIP_PRIORITY.index(membership)
            old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
            if old_priority < new_priority:
                membership = prev_membership

        # otherwise, get the user's membership at the time of the event.
        if membership is None:
            membership_event = state.get((EventTypes.Member, user_id), None)
            if membership_event:
                membership = membership_event.membership

        # if the user was a member of the room at the time of the event,
        # they can see it.
        if membership == Membership.JOIN:
            return event

        # otherwise, it depends on the room visibility.

        if visibility == "joined":
            # we weren't a member at the time of the event, so we can't
            # see this event.
            return None

        elif visibility == "invited":
            # user can also see the event if they were *invited* at the time
            # of the event.
            return (event if membership == Membership.INVITE else None)

        elif visibility == "shared" and is_peeking:
            # if the visibility is shared, users cannot see the event unless
            # they have *subequently* joined the room (or were members at the
            # time, of course)
            #
            # XXX: if the user has subsequently joined and then left again,
            # ideally we would share history up to the point they left. But
            # we don't know when they left. We just treat it as though they
            # never joined, and restrict access.
            return None

        # the visibility is either shared or world_readable, and the user was
        # not a member at the time. We allow it, provided the original sender
        # has not requested their data to be erased, in which case, we return
        # a redacted version.
        if erased_senders[event.sender]:
            return prune_event(event)

        return event

    # check each event: gives an iterable[None|EventBase]
    filtered_events = map(allowed, events)

    # remove the None entries
    filtered_events = filter(operator.truth, filtered_events)

    # we turn it into a list before returning it.
    defer.returnValue(list(filtered_events))
예제 #42
0
 def __iter__(self):
     return map(lambda i: _wrap(i, self._obj_wrapper), self._l_)
예제 #43
0
파일: distconst.py 프로젝트: rcofre/dit
def insert_rvf(d, func, index=-1):
    """
    Returns a new distribution with an added random variable at index `index`.

    The new random variable must be a function of the other random variables.
    By this, we mean that the entropy of the new random variable conditioned
    on the original random variables should be zero.

    Parameters
    ----------
    dist : Distribution
        The distribution used to construct the new distribution.
    func : callable | list of callable
        A function which takes a single argument---the value of the previous
        random variables---and returns a new random variable. Note, the return
        value will be added to the outcome using `__add__`, and so it should be
        a hashable, orderable sequence (as every outcome must be). If a list of
        callables is provided, then multiple random variables are added
        simultaneously and will appear in the same order as the list.
    index : int
        The index at which to insert the random variable. A value of -1 is
        will append the random variable to the end.

    Returns
    -------
    d : Distribution
        The new distribution.

    Examples
    --------
    >>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
    >>> def xor(outcome):
    ...    return str(int(outcome[0] != outcome[1]))
    ...
    >>> d2 = dit.insert_rvf(d, xor)
    >>> d.outcomes
    ('000', '011', '101', '110')

    """
    try:
        func[0]
    except TypeError:
        funcs = [func]
    else:
        funcs = func

    partial_outcomes = [map(func, d.outcomes) for func in funcs]

    # Now "flatten" the new contributions.
    partial_outcomes = [
        d._outcome_ctor([o for o_list in outcome for o in o_list])
        for outcome in zip(*partial_outcomes)
    ]

    new_outcomes = zip(d.outcomes, partial_outcomes)
    if index == -1:
        outcomes = [old + new for old, new in new_outcomes]
    else:
        outcomes = [
            old[:index] + new + old[index:] for old, new in new_outcomes
        ]

    d2 = Distribution(outcomes, d.pmf.copy(), base=d.get_base())
    return d2
예제 #44
0
    def handle(self, **options):
        runs = []
        all_es_indices = get_all_expected_es_indices()
        es = get_es_new()
        indices_needing_reindex = [
            info for info in all_es_indices
            if not es.indices.exists(info.index)
        ]

        if not indices_needing_reindex:
            print('Nothing needs to be reindexed')
            return

        print("Reindexing:\n\t", end=' ')
        print('\n\t'.join(map(six.text_type, indices_needing_reindex)))

        preindex_message = """
        Heads up!

        %s is going to start preindexing the following indices:\n
        %s

        This may take a while, so don't deploy until all these have reported finishing.
            """ % (settings.EMAIL_SUBJECT_PREFIX, '\n\t'.join(
            map(six.text_type, indices_needing_reindex)))

        mail_admins("Pillow preindexing starting", preindex_message)
        start = datetime.utcnow()
        for index_info in indices_needing_reindex:
            # loop through pillows once before running greenlets
            # to fail hard on misconfigured pillows
            reindex_command = get_reindex_commands(index_info.alias)
            if not reindex_command:
                raise Exception(
                    "Error, pillow [%s] is not configured "
                    "with its own management command reindex command "
                    "- it needs one" % index_info.alias)

        for index_info in indices_needing_reindex:
            print(index_info.alias)
            g = gevent.spawn(do_reindex, index_info.alias, options['reset'])
            runs.append(g)

        if len(indices_needing_reindex) > 0:
            gevent.joinall(runs)
            try:
                for job in runs:
                    job.get()
            except Exception:
                f = StringIO()
                traceback.print_exc(file=f)
                mail_admins("Pillow preindexing failed", f.getvalue())
                raise
            else:
                mail_admins(
                    "Pillow preindexing completed",
                    "Reindexing %s took %s seconds" %
                    (', '.join(map(six.text_type, indices_needing_reindex)),
                     (datetime.utcnow() - start).seconds))

        print("All pillowtop reindexing jobs completed")
예제 #45
0
def multi_exec(func, *args, **kwargs):
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    return list(map_results)
예제 #46
0
파일: distconst.py 프로젝트: rcofre/dit
    def from_mapping(self, mapping, force=True):
        """
        Returns a callable implementing a random variable via a mapping.

        Parameters
        ----------
        mapping : dict
            A mapping from outcomes to values of the new random variable.

        force : bool
            Ideally, the values of `mapping` should be satisfy the requirements
            of all outcomes (hashable, ordered sequences), but if `force` is
            `True`, we will attempt to use the distribution's outcome
            constructor and make sure that they are. If they are not, then
            the outcomes will be placed into a 1-tuple. This is strictly
            a convenience for users. As an example, suppose the outcomes are
            strings, the values of `mapping` can also be strings without issue.
            However, if the outcomes are tuples of integers, then the values
            *should* also be tuples. When `force` is `True`, then the values
            can be integers and then they will be transformed into 1-tuples.

        Returns
        -------
        func : function
            A callable implementing the desired function. It receives a single
            argument, the outcome, and returns an outcome for the calculation.

        Examples
        --------
        >>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
        >>> bf = dit.RVFunctions(d)
        >>> mapping = {'00': '0', '01': '1', '10': '1', '11': '0'}
        >>> d = dit.insert_rvf(d, bf.from_mapping(mapping))
        >>> d.outcomes
        ('000', '011', '101', '110')

        Same example as above but now with tuples.

        >>> d = dit.Distribution([(0,0), (0,1), (1,0), (1,1)], [1/4]*4)
        >>> bf = dit.RVFunctions(d)
        >>> mapping = {(0,0): 0, (0,1): 1, (1,0): 1, (1,1): 0}
        >>> d = dit.insert_rvf(d, bf.from_mapping(mapping, force=True))
        >>> d.outcomes
        ((0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 0))

        See Also
        --------
        dit.modify_outcomes

        """
        ctor = self.ctor
        if force:
            try:
                list(map(ctor, mapping.values()))
            except (TypeError, ditException):
                values = [ctor([o]) for o in mapping.values()]
                mapping = dict(zip(mapping.keys(), values))

        def func(outcome):
            return mapping[outcome]

        return func
예제 #47
0
    def process_tokens(self, tokens):
        """process tokens and search for :

         _ non strict indentation (i.e. not always using the <indent> parameter as
           indent unit)
         _ too long lines (i.e. longer than <max_chars>)
         _ optionally bad construct (if given, bad_construct must be a compiled
           regular expression).
        """
        self._bracket_stack = [None]
        indents = [0]
        check_equal = False
        line_num = 0
        self._lines = {}
        self._visited_lines = {}
        token_handlers = self._prepare_token_dispatcher()
        self._last_line_ending = None

        self._current_line = ContinuedLineState(tokens, self.config)
        for idx, (tok_type, token, start, _, line) in enumerate(tokens):
            if start[0] != line_num:
                line_num = start[0]
                # A tokenizer oddity: if an indented line contains a multi-line
                # docstring, the line member of the INDENT token does not contain
                # the full line; therefore we check the next token on the line.
                if tok_type == tokenize.INDENT:
                    self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
                else:
                    self.new_line(TokenWrapper(tokens), idx - 1, idx)

            if tok_type == tokenize.NEWLINE:
                # a program statement, or ENDMARKER, will eventually follow,
                # after some (possibly empty) run of tokens of the form
                #     (NL | COMMENT)* (INDENT | DEDENT+)?
                # If an INDENT appears, setting check_equal is wrong, and will
                # be undone when we see the INDENT.
                check_equal = True
                self._process_retained_warnings(TokenWrapper(tokens), idx)
                self._current_line.next_logical_line()
                self._check_line_ending(token, line_num)
            elif tok_type == tokenize.INDENT:
                check_equal = False
                self.check_indent_level(token, indents[-1] + 1, line_num)
                indents.append(indents[-1] + 1)
            elif tok_type == tokenize.DEDENT:
                # there's nothing we need to check here!  what's important is
                # that when the run of DEDENTs ends, the indentation of the
                # program statement (or ENDMARKER) that triggered the run is
                # equal to what's left at the top of the indents stack
                check_equal = True
                if len(indents) > 1:
                    del indents[-1]
            elif tok_type == tokenize.NL:
                self._check_continued_indentation(TokenWrapper(tokens),
                                                  idx + 1)
                self._current_line.next_physical_line()
            elif tok_type != tokenize.COMMENT:
                self._current_line.handle_line_start(idx)
                # This is the first concrete token following a NEWLINE, so it
                # must be the first token of the next program statement, or an
                # ENDMARKER; the "line" argument exposes the leading whitespace
                # for this statement; in the case of ENDMARKER, line is an empty
                # string, so will properly match the empty string with which the
                # "indents" stack was seeded
                if check_equal:
                    check_equal = False
                    self.check_indent_level(line, indents[-1], line_num)

            if tok_type == tokenize.NUMBER and token.endswith('l'):
                self.add_message('lowercase-l-suffix', line=line_num)

            try:
                handler = token_handlers[token]
            except KeyError:
                pass
            else:
                handler(tokens, idx)

        line_num -= 1  # to be ok with "wc -l"
        if line_num > self.config.max_module_lines:
            # Get the line where the too-many-lines (or its message id)
            # was disabled or default to 1.
            symbol = self.linter.msgs_store.check_message_id('too-many-lines')
            names = (symbol.msgid, 'too-many-lines')
            line = next(
                filter(None, map(self.linter._pragma_lineno.get, names)), 1)
            self.add_message('too-many-lines',
                             args=(line_num, self.config.max_module_lines),
                             line=line)
예제 #48
0
파일: distconst.py 프로젝트: rcofre/dit
def uniform_distribution(outcome_length, alphabet_size, base=None):
    """
    Returns a uniform distribution.

    Parameters
    ----------
    outcome_length : int
        The length of the outcomes.

    alphabet_size : int, list of lists
        The alphabets used to construct the outcomes of the distribution. If an
        integer, then the alphabet for each random variable will be the same,
        consisting of integers from 0 to k-1 where k is the alphabet size.
        If a list, then the elements are used as the alphabet for each random
        variable.  If the list has a single element, then it will be used
        as the alphabet for each random variable.

    base : float, 'linear', 'e'
        The desired base for the distribution probabilities.

    Returns
    -------
    d : Distribution.
        A uniform distribution.

    Examples
    --------
    Each random variable has the same standardized alphabet: [0,1]
    >>> d = dit.uniform_distribution(2, 2)

    Each random variable has its own alphabet.
    >>> d = dit.uniform_distribution(2, [[0,1],[1,2]])

    Both random variables have ['H','T'] as an alphabet.
    >>> d = dit.uniform_distribution(2, [['H','T']])

    """
    try:
        int(alphabet_size)
    except TypeError:
        # Assume it is a list of lists.
        alphabet = alphabet_size

        # Autoextend if only one alphabet is provided.
        if len(alphabet) == 1:
            alphabet = [alphabet[0]] * outcome_length
        elif len(alphabet) != outcome_length:
            raise TypeError("outcome_length does not match number of rvs.")
    else:
        # Build the standard alphabet.
        alphabet = [tuple(range(alphabet_size))] * outcome_length

    try:
        Z = np.prod(list(map(len, alphabet)))
        try:
            # for some reason numpypy.prod returns a list, and pypy can't handle
            #   multiplying a list by a numpy float.
            Z = int(Z[0])
        except:
            pass
    except TypeError:
        raise TypeError("alphabet_size must be an int or list of lists.")

    pmf = [1 / Z] * Z
    outcomes = tuple(product(*alphabet))
    d = Distribution(outcomes, pmf, base='linear')

    # Maybe we should use ditParams['base'] when base is None?
    if base is not None:
        d.set_base(base)

    return d
예제 #49
0
파일: common.py 프로젝트: nickloman/ete
 def info(self, *args):
     if LOG_LEVEL >=3:
         print("INFO  - ", ' '.join(map(str, args)), file=self.out)
예제 #50
0
def multi_apply(func, *args, **kwargs):
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    return tuple(map(list, zip(*map_results)))
예제 #51
0
파일: common.py 프로젝트: nickloman/ete
 def error(self, *args):
     if LOG_LEVEL >=1:
         print("ERROR - ", ' '.join(map(str, args)), file=self.out)
예제 #52
0
파일: common.py 프로젝트: nickloman/ete
 def debug(self, *args):
     if LOG_LEVEL >=4:
         print("DEBUG - ", ' '.join(map(str, args)), file=self.out)
예제 #53
0
def apply_event(state, event, user_profile, include_subscribers):
    # type: (Dict[str, Any], Dict[str, Any], UserProfile, bool) -> None
    if event['type'] == "message":
        state['max_message_id'] = max(state['max_message_id'],
                                      event['message']['id'])
    elif event['type'] == "hotspots":
        state['hotspots'] = event['hotspots']
    elif event['type'] == "custom_profile_fields":
        state['custom_profile_fields'] = event['fields']
    elif event['type'] == "pointer":
        state['pointer'] = max(state['pointer'], event['pointer'])
    elif event['type'] == "realm_user":
        person = event['person']

        def our_person(p):
            # type: (Dict[str, Any]) -> bool
            return p['user_id'] == person['user_id']

        if event['op'] == "add":
            state['realm_users'].append(person)
        elif event['op'] == "remove":
            state['realm_users'] = [
                user for user in state['realm_users'] if not our_person(user)
            ]
        elif event['op'] == 'update':
            if (person['user_id'] == user_profile.id and 'avatar_url' in person
                    and 'avatar_url' in state):
                state['avatar_source'] = person['avatar_source']
                state['avatar_url'] = person['avatar_url']
                state['avatar_url_medium'] = person['avatar_url_medium']
            if 'avatar_source' in person:
                # Drop these so that they don't modify the
                # `realm_user` structure in the `p.update()` line
                # later; they're only used in the above lines
                del person['avatar_source']
                del person['avatar_url_medium']

            for field in ['is_admin', 'email', 'full_name']:
                if person[
                        'user_id'] == user_profile.id and field in person and field in state:
                    state[field] = person[field]

            for p in state['realm_users']:
                if our_person(p):
                    # In the unlikely event that the current user
                    # just changed to/from being an admin, we need
                    # to add/remove the data on all bots in the
                    # realm.  This is ugly and probably better
                    # solved by removing the all-realm-bots data
                    # given to admin users from this flow.
                    if ('is_admin' in person and 'realm_bots' in state
                            and user_profile.email == person['email']):
                        if p['is_admin'] and not person['is_admin']:
                            state['realm_bots'] = []
                        if not p['is_admin'] and person['is_admin']:
                            state['realm_bots'] = get_owned_bot_dicts(
                                user_profile)

                    # Now update the person
                    p.update(person)
    elif event['type'] == 'realm_bot':
        if event['op'] == 'add':
            state['realm_bots'].append(event['bot'])

        if event['op'] == 'remove':
            email = event['bot']['email']
            for bot in state['realm_bots']:
                if bot['email'] == email:
                    bot['is_active'] = False

        if event['op'] == 'update':
            for bot in state['realm_bots']:
                if bot['email'] == event['bot']['email']:
                    if 'owner_id' in event['bot']:
                        bot['owner'] = get_user_profile_by_id(
                            event['bot']['owner_id']).email
                    else:
                        bot.update(event['bot'])

    elif event['type'] == 'stream':
        if event['op'] == 'create':
            for stream in event['streams']:
                if not stream['invite_only']:
                    stream_data = copy.deepcopy(stream)
                    if include_subscribers:
                        stream_data['subscribers'] = []
                    # Add stream to never_subscribed (if not invite_only)
                    state['never_subscribed'].append(stream_data)
                state['streams'].append(stream)
            state['streams'].sort(key=lambda elt: elt["name"])

        if event['op'] == 'delete':
            deleted_stream_ids = {
                stream['stream_id']
                for stream in event['streams']
            }
            state['streams'] = [
                s for s in state['streams']
                if s['stream_id'] not in deleted_stream_ids
            ]
            state['never_subscribed'] = [
                stream for stream in state['never_subscribed']
                if stream['stream_id'] not in deleted_stream_ids
            ]

        if event['op'] == 'update':
            # For legacy reasons, we call stream data 'subscriptions' in
            # the state var here, for the benefit of the JS code.
            for obj in state['subscriptions']:
                if obj['name'].lower() == event['name'].lower():
                    obj[event['property']] = event['value']
            # Also update the pure streams data
            for stream in state['streams']:
                if stream['name'].lower() == event['name'].lower():
                    prop = event['property']
                    if prop in stream:
                        stream[prop] = event['value']
        elif event['op'] == "occupy":
            state['streams'] += event['streams']
        elif event['op'] == "vacate":
            stream_ids = [s["stream_id"] for s in event['streams']]
            state['streams'] = [
                s for s in state['streams'] if s["stream_id"] not in stream_ids
            ]
    elif event['type'] == 'default_streams':
        state['realm_default_streams'] = event['default_streams']
    elif event['type'] == 'realm':
        if event['op'] == "update":
            field = 'realm_' + event['property']
            state[field] = event['value']

            # Tricky interaction: Whether we can create streams can get changed here.
            if field == 'realm_create_stream_by_admins_only' and 'can_create_streams' in state:
                state['can_create_streams'] = user_profile.can_create_streams()
        elif event['op'] == "update_dict":
            for key, value in event['data'].items():
                state['realm_' + key] = value
                # It's a bit messy, but this is where we need to
                # update the state for whether password authentication
                # is enabled on this server.
                if key == 'authentication_methods':
                    state['realm_password_auth_enabled'] = (value['Email']
                                                            or value['LDAP'])
    elif event['type'] == "subscription":
        if not include_subscribers and event['op'] in [
                'peer_add', 'peer_remove'
        ]:
            return

        if event['op'] in ["add"]:
            if include_subscribers:
                # Convert the emails to user_profile IDs since that's what register() returns
                # TODO: Clean up this situation by making the event also have IDs
                for item in event["subscriptions"]:
                    item["subscribers"] = [
                        get_user_profile_by_email(email).id
                        for email in item["subscribers"]
                    ]
            else:
                # Avoid letting 'subscribers' entries end up in the list
                for i, sub in enumerate(event['subscriptions']):
                    event['subscriptions'][i] = copy.deepcopy(
                        event['subscriptions'][i])
                    del event['subscriptions'][i]['subscribers']

        def name(sub):
            # type: (Dict[str, Any]) -> Text
            return sub['name'].lower()

        if event['op'] == "add":
            added_names = set(map(name, event["subscriptions"]))
            was_added = lambda s: name(s) in added_names

            # add the new subscriptions
            state['subscriptions'] += event['subscriptions']

            # remove them from unsubscribed if they had been there
            state['unsubscribed'] = [
                s for s in state['unsubscribed'] if not was_added(s)
            ]

            # remove them from never_subscribed if they had been there
            state['never_subscribed'] = [
                s for s in state['never_subscribed'] if not was_added(s)
            ]

        elif event['op'] == "remove":
            removed_names = set(map(name, event["subscriptions"]))
            was_removed = lambda s: name(s) in removed_names

            # Find the subs we are affecting.
            removed_subs = list(filter(was_removed, state['subscriptions']))

            # Remove our user from the subscribers of the removed subscriptions.
            if include_subscribers:
                for sub in removed_subs:
                    sub['subscribers'] = [
                        id for id in sub['subscribers']
                        if id != user_profile.id
                    ]

            # We must effectively copy the removed subscriptions from subscriptions to
            # unsubscribe, since we only have the name in our data structure.
            state['unsubscribed'] += removed_subs

            # Now filter out the removed subscriptions from subscriptions.
            state['subscriptions'] = [
                s for s in state['subscriptions'] if not was_removed(s)
            ]

        elif event['op'] == 'update':
            for sub in state['subscriptions']:
                if sub['name'].lower() == event['name'].lower():
                    sub[event['property']] = event['value']
        elif event['op'] == 'peer_add':
            user_id = event['user_id']
            for sub in state['subscriptions']:
                if (sub['name'] in event['subscriptions']
                        and user_id not in sub['subscribers']):
                    sub['subscribers'].append(user_id)
            for sub in state['never_subscribed']:
                if (sub['name'] in event['subscriptions']
                        and user_id not in sub['subscribers']):
                    sub['subscribers'].append(user_id)
        elif event['op'] == 'peer_remove':
            user_id = event['user_id']
            for sub in state['subscriptions']:
                if (sub['name'] in event['subscriptions']
                        and user_id in sub['subscribers']):
                    sub['subscribers'].remove(user_id)
    elif event['type'] == "presence":
        # TODO: Add user_id to presence update events / state format!
        presence_user_profile = get_user_profile_by_email(event['email'])
        state['presences'][
            event['email']] = UserPresence.get_status_dict_by_user(
                presence_user_profile)[event['email']]
    elif event['type'] == "update_message":
        # The client will get the updated message directly
        pass
    elif event['type'] == "reaction":
        # The client will get the message with the reactions directly
        pass
    elif event['type'] == "referral":
        state['referrals'] = event['referrals']
    elif event['type'] == 'typing':
        # Typing notification events are transient and thus ignored
        pass
    elif event['type'] == "update_message_flags":
        # The client will get the message with the updated flags directly
        pass
    elif event['type'] == "realm_domains":
        if event['op'] == 'add':
            state['realm_domains'].append(event['realm_domain'])
        elif event['op'] == 'change':
            for realm_domain in state['realm_domains']:
                if realm_domain['domain'] == event['realm_domain']['domain']:
                    realm_domain['allow_subdomains'] = event['realm_domain'][
                        'allow_subdomains']
        elif event['op'] == 'remove':
            state['realm_domains'] = [
                realm_domain for realm_domain in state['realm_domains']
                if realm_domain['domain'] != event['domain']
            ]
    elif event['type'] == "realm_emoji":
        state['realm_emoji'] = event['realm_emoji']
    elif event['type'] == "alert_words":
        state['alert_words'] = event['alert_words']
    elif event['type'] == "muted_topics":
        state['muted_topics'] = event["muted_topics"]
    elif event['type'] == "realm_filters":
        state['realm_filters'] = event["realm_filters"]
    elif event['type'] == "update_display_settings":
        if event['setting_name'] == "twenty_four_hour_time":
            state['twenty_four_hour_time'] = event["setting"]
        if event['setting_name'] == 'left_side_userlist':
            state['left_side_userlist'] = event["setting"]
        if event['setting_name'] == 'emoji_alt_code':
            state['emoji_alt_code'] = event["setting"]
        if event['setting_name'] == 'emojiset':
            state['emojiset'] = event["setting"]
        if event['setting_name'] == 'default_language':
            state['default_language'] = event["setting"]
        if event['setting_name'] == 'timezone':
            state['timezone'] = event["setting"]
    elif event['type'] == "update_global_notifications":
        if event['notification_name'] == "enable_stream_desktop_notifications":
            state['enable_stream_desktop_notifications'] = event['setting']
        elif event['notification_name'] == "enable_stream_sounds":
            state['enable_stream_sounds'] = event['setting']
        elif event['notification_name'] == "enable_desktop_notifications":
            state['enable_desktop_notifications'] = event['setting']
        elif event['notification_name'] == "enable_sounds":
            state['enable_sounds'] = event['setting']
        elif event[
                'notification_name'] == "enable_offline_email_notifications":
            state['enable_offline_email_notifications'] = event['setting']
        elif event['notification_name'] == "enable_offline_push_notifications":
            state['enable_offline_push_notifications'] = event['setting']
        elif event['notification_name'] == "enable_online_push_notifications":
            state['enable_online_push_notifications'] = event['setting']
        elif event['notification_name'] == "enable_digest_emails":
            state['enable_digest_emails'] = event['setting']
        elif event[
                'notification_name'] == "pm_content_in_desktop_notifications":
            state['pm_content_in_desktop_notifications'] = event['setting']
    else:
        raise AssertionError("Unexpected event type %s" % (event['type'], ))
예제 #54
0
파일: common.py 프로젝트: nickloman/ete
 def warn(self, *args):
     if LOG_LEVEL >=2:
         print("WARN  - ", ' '.join(map(str, args)), file=self.out)
예제 #55
0
    def _update_profile_rules(self, profile, oldname=None):
        name = six.text_type(profile['name'])

        parms = {
            'cn': name,
            'ipadeskprofiletarget': name,
            'ipadeskprofilepriority': profile['priority'],
        }

        if oldname is not None:
            # Update profile renaming it
            logging.debug(
                'FreeIPAConnector: Updating rule %s and renaming to %s' %
                (oldname, name))
            parms['cn'] = oldname
            parms['rename'] = name
        else:
            logging.debug('FreeIPAConnector: Updating rule for %s' % name)

        # If not hosts, set hostcategory to all
        if profile['hosts'] == [] and profile['hostgroups'] == []:
            parms['hostcategory'] = u'all'
        else:
            parms['hostcategory'] = None

        try:
            api.Command.deskprofilerule_mod(**parms)
        except errors.EmptyModlist:
            pass
        except Exception as e:
            logging.error('FreeIPAConnector: Error updating rule %s: %s - %s' %
                          (name, e, e.__class__))
            raise e

        # Get current users, groups, hosts and hostgroups for this rule
        rule = self.get_profile_rule(name)
        applies = self._get_profile_applies_from_rule(rule)
        # Get users and groups to add
        udif = set(profile['users']) - set(applies['users'])
        gdif = set(profile['groups']) - set(applies['groups'])
        # Add the users and groups to rule
        api.Command.deskprofilerule_add_user(
            name,
            user=list(map(six.text_type, udif)),
            group=list(map(six.text_type, gdif)))
        # Get users and groups to remove
        udif = set(applies['users']) - set(profile['users'])
        gdif = set(applies['groups']) - set(profile['groups'])
        # Remove users and groups from rule
        api.Command.deskprofilerule_remove_user(
            name,
            user=list(map(six.text_type, udif)),
            group=list(map(six.text_type, gdif)))

        if parms['hostcategory'] == 'all':
            api.Command.deskprofilerule_remove_host(
                name,
                host=list(map(six.text_type, applies['hosts'])),
                hostgroup=list(map(six.text_type, applies['hostgroups'])))
        else:
            # Get hosts and hostgroups to add
            hdif = set(profile['hosts']) - set(applies['hosts'])
            hgdif = set(profile['hostgroups']) - set(applies['hostgroups'])
            # Add the hosts and hostgroups to rule
            api.Command.deskprofilerule_add_host(
                name,
                host=list(map(six.text_type, hdif)),
                hostgroup=list(map(six.text_type, hgdif)))
            # Get hosts and hostgroups to remove
            hdif = set(applies['hosts']) - set(profile['hosts'])
            hgdif = set(applies['hostgroups']) - set(profile['hostgroups'])
            # Remove hosts and hostgroups from rule
            api.Command.deskprofilerule_remove_host(
                name,
                host=list(map(six.text_type, hdif)),
                hostgroup=list(map(six.text_type, hgdif)))

            # Check final hosts and set hostcategory to all if needed
            rule = self.get_profile_rule(name)
            applies = self._get_profile_applies_from_rule(rule)
            logging.debug('FreeIPAConnector: Applies after update: %s' %
                          applies)
            if applies['hosts'] == [] and applies['hostgroups'] == []:
                logging.debug('FreeIPAConnector: Setting hostcategory to all')
                parms['hostcategory'] = u'all'
                try:
                    api.Command.deskprofilerule_mod(**parms)
                except errors.EmptyModlist:
                    pass
                except Exception as e:
                    logging.error(
                        'FreeIPAConnector: Error updating rule %s: %s - %s' %
                        (name, e, e.__class__))
                    raise e
예제 #56
0
 def collect_usages(self):
     self._collect_flags()
     return ['[--help]'] + list(map(lambda x: x.usage, self._flags))
예제 #57
0
def main():
    if len(sys.argv) < 2:
        print("""
        commands:
        get_all <index>
        get_many <index> <key>
        get <index> <key>
        insert "message body" "message id"
        search "json query"
        indexes
        tmpdb <destination folder>
        """)
        return

    if sys.argv[1] == 'get_all':
        init()
        _test_query({
            'method': 'get_all',
            'index': sys.argv[2],
        })
        shutdown()

    if sys.argv[1] == 'get_many':
        init()
        _test_query({
            'method': 'get_many',
            'index': sys.argv[2],
            'key': sys.argv[3],
        })
        shutdown()

    if sys.argv[1] == 'get':
        init()
        _test_query({
            'method': 'get',
            'index': sys.argv[2],
            'key': sys.argv[3],
        })
        shutdown()

    if sys.argv[1] == 'indexes':
        init()
        print('Indexes in %s are:' % db().path)
        print('  ' + ('\n  '.join(db().indexes_names)))
        shutdown()

    if sys.argv[1] == 'refresh':
        print('ReIndexing')
        init()
        refresh_indexes(db())
        shutdown()

    if sys.argv[1] == 'tmpdb':
        recreate_db(sys.argv[2])

    if sys.argv[1] == 'insert':
        init()
        print(insert(build_json_message(data=sys.argv[2], message_id=sys.argv[3])))
        shutdown()

    if sys.argv[1] == 'search':
        init()
        print('\n'.join(map(str, [m for m in search(json.loads(sys.argv[2]))])))
        shutdown()
예제 #58
0
파일: run.py 프로젝트: shots47s/datalad
 def glob_dirs():
     return list(map(op.dirname, gpaths.expand(refresh=True)))
예제 #59
0
 def generic_visit(self, n):
     if isinstance(n, ast.AST):
         return r'' % (n.__class__.__name__, ', '.join(map(self.visit, [getattr(n, f) for f in n._fields])))
     else:
         return str(n)
예제 #60
0
def _collect_monomials(expression, self):
    """Refactorises an expression into a sum-of-products form, using
    distributivity rules (i.e. a*(b + c) -> a*b + a*c).  Expansion
    proceeds until all "compound" expressions are broken up.

    :arg expression: a GEM expression to refactorise
    :arg self: function for recursive calls

    :returns: :py:class:`MonomialSum`

    :raises FactorisationError: Failed to break up some "compound"
                                expressions with expansion.
    """

    # Phase 1: Collect and categorise product terms
    def stop_at(expr):
        # Break up compounds only
        return self.classifier(expr) != COMPOUND

    common_indices, terms = traverse_product(expression, stop_at=stop_at)
    common_indices = tuple(common_indices)

    common_atomics = []
    common_others = []
    compounds = []
    for term in terms:
        label = self.classifier(term)
        if label == ATOMIC:
            common_atomics.append(term)
        elif label == COMPOUND:
            compounds.append(term)
        elif label == OTHER:
            common_others.append(term)
        else:
            raise ValueError("Classifier returned illegal value.")
    common_atomics = tuple(common_atomics)

    # Phase 2: Attempt to break up compound terms into summands
    sums = []
    for expr in compounds:
        summands = traverse_sum(expr, stop_at=stop_at)
        if len(summands) <= 1:
            # Compound term is not an addition, avoid infinite
            # recursion and fail gracefully raising an exception.
            raise FactorisationError(expr)
        # Recurse into each summand, concatenate their results
        sums.append(MonomialSum.sum(*map(self, summands)))

    # Phase 3: Expansion
    #
    # Each element of ``sums`` is a MonomialSum.  Expansion produces a
    # series (representing a sum) of products of monomials.
    result = MonomialSum()
    for s, a, r in MonomialSum.product(*sums, rename_map=self.rename_map):
        renamer = make_renamer(self.rename_map)
        renamer(common_indices)  # update current_set
        s_, applier = renamer(s)

        all_indices = common_indices + s_
        atomics = common_atomics + tuple(map(applier, a))

        # All free indices that appear in atomic terms
        atomic_indices = set().union(
            *[atomic.free_indices for atomic in atomics])

        # Sum indices that appear in atomic terms
        # (will go to the result :py:class:`Monomial`)
        sum_indices = tuple(index for index in all_indices
                            if index in atomic_indices)

        # Sum indices that do not appear in atomic terms
        # (can factorise them over atomic terms immediately)
        rest_indices = tuple(index for index in all_indices
                             if index not in atomic_indices)

        # Not really sum factorisation, but rather just an optimised
        # way of building a product.
        rest = sum_factorise(rest_indices, common_others + [applier(r)])

        result.add(sum_indices, atomics, rest)
    return result