Example #1
0
    def __new__(cls, name, bases, attrs):
        cls = super(DeviceBase, cls).__new__(cls, name, bases, attrs)
        meta = DeviceMeta()
        parents = get_all_parents(cls)[1:]

        # compute adapter params
        adapter_params_klass = [getattr(klass, "AdapterParams", None)
                                for klass in parents + (cls, )]
        adapter_kwargs = {}
        for klass in adapter_params_klass:
            if klass is None:
                continue
            adapter_kwargs.update(vars(klass))
        meta.adapter_kwargs = dict((k, v)
                                   for (k, v) in iteritems(adapter_kwargs)
                                   if not k.starstwith("_"))

        # get parent fields and groups and populate ours dictionaries
        meta.fields = all_fields = {}
        meta.groups = all_groups = {}
        meta.lut = all_lut = {}
        for klass in parents:
            all_fields.update(klass.meta.fields)
            all_groups.update(klass.meta.groups)
            all_lut.update(klass.meta.lut)

        # get class fields and groups
        fields = {}
        groups = {}
        prefixes = {}
        for name, obj in iteritems(attrs):
            if name.startswith("__"):
                continue
            if isinstance(obj, Field):
                fields[name] = obj
            elif isinstance(obj, Group):
                prefixes[name] = obj.prefix
                groups[name] = obj.fields

        # create OID to field look up table
        lut = {}
        for group in groups.values():
            for name, field in group.items():
                lut[field.oid] = FieldInfo(name, field)

        all_fields.update(fields)
        all_groups.update(groups)
        all_lut.update(lut)

        # create containers class
        for group_name in iterkeys(groups):
            klass = type('Container', (AbstractContainer, ), dict(
                prefix=prefixes[group_name],
                group=group_name,
                items_list=set(iterkeys(meta.groups[group_name])),
            ))
            setattr(cls, group_name, klass)

        cls.meta = meta
        return cls
Example #2
0
    def _get_serialized_fields(self, item, default_value=None, include_empty=None):
        """Return the fields to export as an iterable of tuples
        (name, serialized_value)
        """
        if include_empty is None:
            include_empty = self.export_empty_fields
        if self.fields_to_export is None:
            if include_empty and not isinstance(item, dict):
                field_iter = six.iterkeys(item.fields)
            else:
                field_iter = six.iterkeys(item)
        else:
            if include_empty:
                field_iter = self.fields_to_export
            else:
                field_iter = (x for x in self.fields_to_export if x in item)

        for field_name in field_iter:
            if field_name in item:
                field = {} if isinstance(item, dict) else item.fields[field_name]
                value = self.serialize_field(field, field_name, item[field_name])
            else:
                value = default_value

            yield field_name, value
Example #3
0
  def _log_joint(self, z_sample):
    """Utility function to calculate model's log joint density,
    log p(x, z), for inputs z (and fixed data x).

    Args:
      z_sample: dict.
        Latent variable keys to samples.
    """
    self.scope_iter += 1
    scope = 'inference_' + str(id(self)) + '/' + str(self.scope_iter)
    # Form dictionary in order to replace conditioning on prior or
    # observed variable with conditioning on a specific value.
    dict_swap = z_sample.copy()
    for x, qx in six.iteritems(self.data):
      if isinstance(x, RandomVariable):
        if isinstance(qx, RandomVariable):
          qx_copy = copy(qx, scope=scope)
          dict_swap[x] = qx_copy.value()
        else:
          dict_swap[x] = qx

    log_joint = 0.0
    for z in six.iterkeys(self.latent_vars):
      z_copy = copy(z, dict_swap, scope=scope)
      log_joint += tf.reduce_sum(z_copy.log_prob(dict_swap[z]))

    for x in six.iterkeys(self.data):
      if isinstance(x, RandomVariable):
        x_copy = copy(x, dict_swap, scope=scope)
        log_joint += tf.reduce_sum(x_copy.log_prob(dict_swap[x]))

    return log_joint
Example #4
0
def _get_assignments_in_maxima(assignments, prefix=""):
    my_variable_names = set(six.iterkeys(assignments))
    written_assignments = set()

    prefix_subst_dict = dict(
            (vn, prefix+vn) for vn in my_variable_names)

    from pymbolic.maxima import MaximaStringifyMapper
    mstr = MaximaStringifyMapper()
    s2p = SympyToPymbolicMapper()
    dkill = _DerivativeKiller()

    result = []

    def write_assignment(name):
        symbols = [atm for atm in assignments[name].atoms()
                if isinstance(atm, sym.Symbol)
                and atm.name in my_variable_names]

        for symb in symbols:
            if symb.name not in written_assignments:
                write_assignment(symb.name)

        result.append("%s%s : %s;" % (
            prefix, name, mstr(dkill(s2p(
                assignments[name].subs(prefix_subst_dict))))))
        written_assignments.add(name)

    for name in six.iterkeys(assignments):
        if name not in written_assignments:
            write_assignment(name)

    return "\n".join(result)
Example #5
0
File: api.py Project: agiza/heat
def format_resource_attributes(resource, with_attr=None):
    resolver = resource.attributes
    if not with_attr:
        with_attr = []

    def resolve(attr, resolver):
        try:
            return resolver._resolver(attr)
        except Exception:
            return None
    # if 'show' in attribute_schema, will resolve all attributes of resource
    # including the ones are not represented in response of show API, such as
    # 'console_urls' for nova server, user can view it by taking with_attr
    # parameter
    if 'show' in six.iterkeys(resolver):
        show_attr = resolve('show', resolver)
        # check if 'show' resolved to dictionary. so it's not None
        if isinstance(show_attr, collections.Mapping):
            for a in with_attr:
                if a not in show_attr:
                    show_attr[a] = resolve(a, resolver)
            return show_attr
        else:
            # remove 'show' attribute if it's None or not a mapping
            # then resolve all attributes manually
            del resolver._attributes['show']
    attributes = set(list(six.iterkeys(resolver)) + with_attr)
    return dict((attr, resolve(attr, resolver))
                for attr in attributes)
Example #6
0
    def put(self, branch_id, branch):
        """Modify this branch.

        :param branch_id: An ID of the branch.
        :param branch: A branch within the request body.
        """

        branch_dict = branch.as_dict(omit_unset=True)

        if "expiration_date" in six.iterkeys(branch_dict):
            abort(400, _("Can't change expiration date."))

        if "expired" in six.iterkeys(branch_dict):
            if branch_dict["expired"]:
                branch_dict["expiration_date"] = datetime.now(tz=pytz.utc)
            else:
                branch_dict["expiration_date"] = None

        if branch.project_id:
            original_branch = branches_api.branch_get(branch_id)

            if not original_branch:
                raise exc.NotFound(_("Branch %s not found") % branch_id)

            if branch.project_id != original_branch.project_id:
                abort(400, _("You can't associate branch %s "
                             "with another project.") % branch_id)

        result = branches_api.branch_update(branch_id, branch_dict)

        if result:
            return wmodels.Branch.from_db_model(result)
        else:
            raise exc.NotFound(_("Branch %s not found") % branch_id)
Example #7
0
    def test_max_scores_number(self):
        """Test max score is correct when groups are number-defined."""
        s1, s2, s3 = 10.5, 30.5, 59
        parameters = [[s1, 2, 10], [s2, 2, 20], [s3, 2, 30]]
        header = ["Subtask 1 (10.5)", "Subtask 2 (30.5)", "Subtask 3 (59)"]

        # Only group 1_* is public.
        public_testcases = dict(self._public_testcases)
        self.assertEqual(
            GroupThreshold(parameters, public_testcases).max_scores(),
            (s1 + s2 + s3, s1, header))

        # All groups are public
        for testcase in iterkeys(public_testcases):
            public_testcases[testcase] = True
        self.assertEqual(
            GroupThreshold(parameters, public_testcases).max_scores(),
            (s1 + s2 + s3, s1 + s2 + s3, header))

        # No groups are public
        for testcase in iterkeys(public_testcases):
            public_testcases[testcase] = False
        self.assertEqual(
            GroupThreshold(parameters, public_testcases).max_scores(),
            (s1 + s2 + s3, 0, header))
Example #8
0
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        connections = None
        if self.CONNECTIONS in prop_diff:
            connections = prop_diff.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(prop_diff)
            self.handle_create()
            return

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_network_gateway(
                self.resource_id, {'network_gateway': prop_diff})

        if connections:
            for connection in self.properties[self.CONNECTIONS]:
                with self.client_plugin().ignore_not_found:
                    self.client_plugin().resolve_network(
                        connection, self.NETWORK, 'network_id')
                    if self.NETWORK in six.iterkeys(connection):
                        connection.pop(self.NETWORK)
                    self.client().disconnect_network_gateway(
                        self.resource_id, connection
                    )
            for connection in connections:
                self.client_plugin().resolve_network(
                    connection, self.NETWORK, 'network_id')
                if self.NETWORK in six.iterkeys(connection):
                    connection.pop(self.NETWORK)
                self.client().connect_network_gateway(
                    self.resource_id, connection
                )
Example #9
0
    def _is_scaling_allowed(self):
        metadata = self.metadata_get()
        if metadata.get('scaling_in_progress'):
            return False
        try:
            # Negative values don't make sense, so they are clamped to zero
            cooldown = max(0, self.properties[self.COOLDOWN])
        except TypeError:
            # If not specified, it will be None, same as cooldown == 0
            cooldown = 0

        if cooldown != 0:
            try:
                if 'cooldown' not in metadata:
                    # Note: this is for supporting old version cooldown logic
                    if metadata:
                        last_adjust = next(six.iterkeys(metadata))
                        if not timeutils.is_older_than(last_adjust, cooldown):
                            return False
                else:
                    last_adjust = next(six.iterkeys(metadata['cooldown']))
                    if not timeutils.is_older_than(last_adjust, cooldown):
                        return False
            except ValueError:
                # occurs when metadata has only {scaling_in_progress: False}
                pass

        # Assumes _finished_scaling is called
        # after the scaling operation completes
        metadata['scaling_in_progress'] = True
        self.metadata_set(metadata)
        return True
Example #10
0
    def test_format_stack_resource(self):
        res = self.stack['generic1']

        resource_keys = set((
            rpc_api.RES_CREATION_TIME,
            rpc_api.RES_UPDATED_TIME,
            rpc_api.RES_NAME,
            rpc_api.RES_PHYSICAL_ID,
            rpc_api.RES_ACTION,
            rpc_api.RES_STATUS,
            rpc_api.RES_STATUS_DATA,
            rpc_api.RES_TYPE,
            rpc_api.RES_ID,
            rpc_api.RES_STACK_ID,
            rpc_api.RES_STACK_NAME,
            rpc_api.RES_REQUIRED_BY,
        ))

        resource_details_keys = resource_keys.union(set((
            rpc_api.RES_DESCRIPTION,
            rpc_api.RES_METADATA,
            rpc_api.RES_SCHEMA_ATTRIBUTES,
        )))

        formatted = api.format_stack_resource(res, True)
        self.assertEqual(resource_details_keys, set(six.iterkeys(formatted)))

        formatted = api.format_stack_resource(res, False)
        self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
Example #11
0
def test_parametric_function_api():
    """
    Testing :function:`nnabla.parametric_functions.parametric_function_api`.
    """
    import nnabla as nn
    import inspect
    nn.clear_parameters()
    shape = (2, 3, 4)

    # Signature check
    spec = inspect.getargspec(dummy_parametric_function)
    assert spec.args == ['shape', 'f', 'i', 's', 'name']
    assert spec.defaults == (10, 1, 'dummy', None)
    assert dummy_parametric_function.__doc__.splitlines()[0] == 'Doc'

    # Verify two different ways does the same thing.
    # Using name argument
    v = dummy_parametric_function(shape, name='group1')
    # Using parameter_scope
    with nn.parameter_scope('group1'):
        v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    assert len(params) == 2
    assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2']

    # No scope
    v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    len(params) == 4
    assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2',
                                      'dummy/p1', 'dummy/p2']
    nn.clear_parameters()
Example #12
0
 def paths(self):
     """Get a list of all paths containing items of this type,
     without actually constructing all the items"""
     rv = set(iterkeys(self.data))
     if self.json_data:
         rv |= set(to_os_path(item) for item in iterkeys(self.json_data))
     return rv
Example #13
0
def assigned_res_type(inst, attr, value):
    """
    Assert only one (or none) assigned resource type is defined in the RAML
    Root and correctly represented in the RAML.
    """
    if value:
        if isinstance(value, tuple([dict, list])) and len(value) > 1:
            msg = "Too many resource types applied to '{0}'.".format(
                inst.display_name
            )
            raise InvalidResourceNodeError(msg)

        res_types = inst.root.raw.get("resourceTypes", {})
        res_type_names = [list(iterkeys(i))[0] for i in res_types]
        if isinstance(value, list):
            item = value[0]  # NOCOV
        elif isinstance(value, dict):
            item = list(iterkeys(value))[0]  # NOCOV
        else:
            item = value
        if item not in res_type_names:
            msg = ("Resource Type '{0}' is assigned to '{1}' but is not "
                   "defined in the root of the API.".format(value,
                                                            inst.display_name))
            raise InvalidResourceNodeError(msg)
Example #14
0
def _recursive_assert_same_structure(nest1, nest2, check_types):
  """Helper function for `assert_same_structure`."""
  is_sequence_nest1 = is_sequence(nest1)
  if is_sequence_nest1 != is_sequence(nest2):
    raise ValueError(
        "The two structures don't have the same nested structure. "
        "First structure: %s, second structure: %s." % (nest1, nest2))

  if not is_sequence_nest1:
    return  # finished checking

  if check_types:
    type_nest1 = type(nest1)
    type_nest2 = type(nest2)
    if type_nest1 != type_nest2:
      raise TypeError(
          "The two structures don't have the same sequence type. First "
          "structure has type %s, while second structure has type %s."
          % (type_nest1, type_nest2))

    if isinstance(nest1, dict):
      keys1 = set(_six.iterkeys(nest1))
      keys2 = set(_six.iterkeys(nest2))
      if keys1 != keys2:
        raise ValueError(
            "The two dictionaries don't have the same set of keys. First "
            "structure has keys {}, while second structure has keys {}."
            .format(keys1, keys2))

  nest1_as_sequence = [n for n in _yield_value(nest1)]
  nest2_as_sequence = [n for n in _yield_value(nest2)]
  for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence):
    _recursive_assert_same_structure(n1, n2, check_types)
Example #15
0
def _sequence_like(instance, args):
  """Converts the sequence `args` to the same type as `instance`.

  Args:
    instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or
        `collections.NamedDict`.
    args: elements to be converted to a sequence.

  Returns:
    `args` with the type of `instance`.
  """
  if isinstance(instance, dict):
    # For dictionaries with their values extracted, we always order the values
    # by sorting the keys first (see note below). This code allows recreating
    # e.g., `OrderedDict`s with their original key ordering.
    result = dict(zip(sorted(_six.iterkeys(instance)), args))
    return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
  elif (isinstance(instance, tuple) and
        hasattr(instance, "_fields") and
        isinstance(instance._fields, _collections.Sequence) and
        all(isinstance(f, _six.string_types) for f in instance._fields)):
    # This is a namedtuple
    return type(instance)(*args)
  else:
    # Not a namedtuple
    return type(instance)(args)
Example #16
0
def sum_values(value1, value2):
    # function borrowed from core, keeping original comment intact
    # TODO: Kill this method with fire, it's the only way to be sure
    if value1 is None:
        return value2
    if value2 is None:
        return value1

    number_types = (float, bool) + six.integer_types  # noqa

    if isinstance(value1, number_types) and isinstance(value2, number_types):
        return int(value1 + value2)

    # make sure the entries are of the same type
    if not (isinstance(value1, value2.__class__) or isinstance(value2, value1.__class__)):
        message = ("Entry %s type %s and entry %s type %s are not of the same type"
                   % (str(value1), str(type(value1)), str(value2), str(type(value2))))
        raise TypeError(message)

    if isinstance(value1, list):
        return list(set(value1 + value2))
    elif isinstance(value1, str) or isinstance(value1, datetime.datetime) or isinstance(value1, six.text_type):  # noqa
        return value1
    elif isinstance(value1, dict):
        keys = set(six.iterkeys(value1)) | set(six.iterkeys(value2))
        return dict((key, sum_values(value1.get(key), value2.get(key))) for key in keys)
    else:
        return value1 + value2
Example #17
0
def test_rm(tmpdir):
    tmpdir = six.text_type(tmpdir)

    shutil.copytree(
        join(dirname(__file__), 'example_results'),
        join(tmpdir, 'example_results'))

    conf = config.Config.from_json({
        'results_dir': join(tmpdir, 'example_results'),
        'repo': "### IGNORED, BUT REQUIRED ###"
        })

    tools.run_asv_with_conf(conf, 'rm', '-y', 'benchmark=time_quantity*')

    results_a = list(results.iter_results(tmpdir))
    for result in results_a:
        for key in six.iterkeys(result.results):
            assert not key.startswith('time_quantity')
        for key in six.iterkeys(result.started_at):
            assert not key.startswith('time_quantity')
        for key in six.iterkeys(result.ended_at):
            assert not key.startswith('time_quantity')

    tools.run_asv_with_conf(conf, 'rm', '-y', 'commit_hash=05d283b9')

    results_b = list(results.iter_results(tmpdir))
    assert len(results_b) == len(results_a) - 1
Example #18
0
    def _cooldown_inprogress(self):
        inprogress = False
        try:
            # Negative values don't make sense, so they are clamped to zero
            cooldown = max(0, self.properties[self.COOLDOWN])
        except TypeError:
            # If not specified, it will be None, same as cooldown == 0
            cooldown = 0

        metadata = self.metadata_get()
        if metadata.get('scaling_in_progress'):
            return True

        if 'cooldown' not in metadata:
            # Note: this is for supporting old version cooldown checking
            if metadata and cooldown != 0:
                last_adjust = next(six.iterkeys(metadata))
                if not timeutils.is_older_than(last_adjust, cooldown):
                    inprogress = True
        elif cooldown != 0:
            last_adjust = next(six.iterkeys(metadata['cooldown']))
            if not timeutils.is_older_than(last_adjust, cooldown):
                inprogress = True

        if not inprogress:
            metadata['scaling_in_progress'] = True
            self.metadata_set(metadata)

        return inprogress
Example #19
0
    def to_pretty (self, format='str'):
        """Return a string with a prettified version of this object’s contents.

        The format is a multiline string where each line is of the form ``key
        = value``. If the *format* argument is equal to ``"str"``, each
        ``value`` is the stringification of the value; if it is ``"repr"``, it
        is its :func:`repr`.

        Calling :func:`str` on a :class:`Holder` returns a slightly different
        pretty stringification that uses a textual representation similar to a
        Python :class:`dict` literal.

        """
        if format == 'str':
            template = '%-*s = %s'
        elif format == 'repr':
            template = '%-*s = %r'
        else:
            raise ValueError ('unrecognied value for "format": %r' % format)

        d = self.__dict__
        maxlen = 0

        for k in six.iterkeys (d):
            maxlen = max (maxlen, len (k))

        return '\n'.join (template % (maxlen, k, d[k])
                          for k in sorted (six.iterkeys (d)))
Example #20
0
def nn_recip_weight(ibs, qaid2_nns, qreq):
    'Filters a nearest neighbor to only reciprocals'
    data_index = qreq.data_index
    K = qreq.cfg.nn_cfg.K
    Krecip = qreq.cfg.filt_cfg.Krecip
    checks = qreq.cfg.nn_cfg.checks
    dx2_data = data_index.dx2_data
    data_flann = data_index.flann
    qaid2_recip_weight = {qaid: None for qaid in six.iterkeys(qaid2_nns)}
    qaid2_metaweight = {qaid: None for qaid in six.iterkeys(qaid2_nns)}
    for qaid in six.iterkeys(qaid2_nns):
        (qfx2_dx, qfx2_dist) = qaid2_nns[qaid]
        nQuery = len(qfx2_dx)
        dim = dx2_data.shape[1]
        # Get the original K nearest features
        qx2_nndx = dx2_data[qfx2_dx[:, 0:K]]
        qx2_nndist = qfx2_dist[:, 0:K]
        qx2_nndx.shape = (nQuery * K, dim)
        # TODO: Have the option for this to be both indexes.
        (_nn2_rdx, _nn2_rdists) = data_flann.nn_index(qx2_nndx, Krecip, checks=checks)
        # Get the maximum distance of the Krecip reciprocal neighbors
        _nn2_rdists.shape = (nQuery, K, Krecip)
        qfx2_recipmaxdist = _nn2_rdists.max(2)
        # Test if nearest neighbor distance is less than reciprocal distance
        qfx2_reciprocalness = qfx2_recipmaxdist - qx2_nndist
        qaid2_recip_weight[qaid] = qfx2_reciprocalness
    return qaid2_recip_weight, qaid2_metaweight
Example #21
0
def nn_scale_weight(ibs, qaid2_nns, qreq):
    # Filter by scale for funzies
    K = qreq.cfg.nn_cfg.K
    aid2_scale_weight = {qaid: None for qaid in six.iterkeys(qaid2_nns)}
    qaid2_metaweight = {qaid: None for qaid in six.iterkeys(qaid2_nns)}
    data_index = qreq.data_index
    K = qreq.cfg.nn_cfg.K
    dx2_aid = data_index.dx2_aid
    dx2_fx = data_index.dx2_fx
    for qaid in six.iterkeys(qaid2_nns):
        (qfx2_dx, qfx2_dist) = qaid2_nns[qaid]
        qfx2_kpts = ibs.get_annot_kpts(qaid)
        qfx2_nn = qfx2_dx[:, 0:K]
        nQuery = len(qfx2_dx)
        qfx2_aid = dx2_aid[qfx2_nn]
        qfx2_fx = dx2_fx[qfx2_nn]
        qfx2_det1 = array(qfx2_kpts[:, [2, 4]], np.float).prod(1)
        qfx2_det1 = np.sqrt(1.0 / qfx2_det1)
        qfx2_K_det1 = np.rollaxis(np.tile(qfx2_det1, (K, 1)), 1)
        qfx2_det2 = array([ibs.get_annot_kpts(aid)[fx, [2, 4]] for (aid, fx) in
                           izip(qfx2_aid.flat, qfx2_fx.flat)], np.float).prod(1)
        qfx2_det2.shape = (nQuery, K)
        qfx2_det2 = np.sqrt(1.0 / qfx2_det2)
        qfx2_scaledist = qfx2_det2 / qfx2_K_det1
        aid2_scale_weight[qaid] = qfx2_scaledist
    return aid2_scale_weight, qaid2_metaweight
Example #22
0
 def set_annotation_data(self, data):
     annotation_id, data_id = data['id'].split('|')
     return OrderedDict([
         ('accept_selectors', data['accept_selectors']),
         ('container_id', data['parent']),
         ('data', {
             data_id: OrderedDict([
                 ('attribute', data['attribute']),
                 ('extractors', list(iterkeys(data['extractors'])) or {}),
                 ('field', data['field'] and next(iterkeys(data['field']))),
                 ('required', data['required']),
             ]),
         }),
         ('id', annotation_id),
         ('text-content', data['text_content']),
         ('post_text', data['post_text']),
         ('pre_text', data['pre_text']),
         ('reject_selectors', data['reject_selectors']),
         ('required', []),
         ('repeated', data['repeated']),
         ('selection_mode', data['selection_mode']),
         ('selector', data['selector']),
         ('tagid', None),
         ('xpath', data['xpath']),
     ])
Example #23
0
    def log_prob(self, xs, zs):
        """
        Parameters
        ----------
        xs : dict of str to tf.Tensor
            Data dictionary. Each key is a data structure used in the
            model (Theano shared variable), and its value is the
            corresponding realization (tf.Tensor).
        zs : dict of str to tf.Tensor
            Latent variable dictionary. Each key names a latent variable
            used in the model (str), and its value is the corresponding
            realization (tf.Tensor).

        Returns
        -------
        tf.Tensor
            Scalar, the log joint density log p(xs, zs).

        Notes
        -----
        It wraps around a Python function. The Python function takes
        inputs of type np.ndarray and outputs a np.ndarray.
        """
        # Store keys so that ``_py_log_prob_args`` knows how each
        # value corresponds to a key.
        self.xs_keys = list(six.iterkeys(xs))
        self.zs_keys = list(six.iterkeys(zs))

        # Pass in all tensors as a flattened list for tf.py_func().
        inputs = [tf.convert_to_tensor(x) for x in six.itervalues(xs)]
        inputs += [tf.convert_to_tensor(z) for z in six.itervalues(zs)]

        return tf.py_func(self._py_log_prob_args, inputs, [tf.float32])[0]
    def test_nonall_item_key_value_lists(self):
        for init in self.inits:
            dic = odict(init.items())
            omd = omdict(init.items())

            # Testing items(), keys(), values(), lists(), and listitems().
            assert omd.items() == list(dic.items())
            assert omd.keys() == list(dic.keys())
            assert omd.values() == list(dic.values())
            iterator = zip(omd.keys(), omd.lists(), omd.listitems())
            for key, valuelist, listitem in iterator:
                assert omd.values(key) == omd.getlist(key) == valuelist
                assert omd.items(key) == [i for i in init.items() if i[0] == key]
                assert listitem == (key, valuelist)

            # Testing iteritems(), iterkeys(), itervalues(), and iterlists().
            for key1, key2 in zip(omd.iterkeys(), six.iterkeys(dic)):
                assert key1 == key2
            for val1, val2 in zip(omd.itervalues(), six.itervalues(dic)):
                assert val1 == val2
            for item1, item2 in zip(omd.iteritems(), six.iteritems(dic)):
                assert item1 == item2
            for key, values in zip(six.iterkeys(omd), omd.iterlists()):
                assert omd.getlist(key) == values
            iterator = zip(omd.iterkeys(), omd.iterlists(), omd.iterlistitems())
            for key, valuelist, listitem in iterator:
                assert listitem == (key, valuelist)

            # Test iteritems() and itervalues() with a key.
            for key in omd.iterkeys():
                assert list(omd.iteritems(key)) == list(zip(repeat(key), omd.getlist(key)))
                assert list(omd.iterallitems(key)) == list(zip(repeat(key), omd.getlist(key)))
            for nonkey in self.nonkeys:
                self.assertRaises(KeyError, omd.iteritems, nonkey)
                self.assertRaises(KeyError, omd.itervalues, nonkey)
    def _compare_odict_and_omddict(self, d, omd):
        assert len(d) == len(omd)  # __len__().

        # __contains__(), has_key(), get(), and setdefault().
        for dkey, omdkey in zip(d, omd):
            assert dkey == omdkey and dkey in d and omdkey in omd
            assert dkey in d and omdkey in omd
            assert d.get(dkey) == omd.get(omdkey)
            d.setdefault(dkey, _unique)
            omd.setdefault(omdkey, _unique)
            assert d.get(dkey) == omd.get(omdkey) and d.get(dkey) != _unique
        for nonkey in self.nonkeys:
            assert d.get(nonkey) == omd.get(nonkey) is None
            d.setdefault(nonkey, _unique)
            omd.setdefault(nonkey, _unique)
            assert d.get(nonkey) == omd.get(nonkey) == _unique

        # items(), keys, values(), iteritems(), iterkeys, and itervalues().
        iterators = [
            zip(d.items(), omd.items(), d.keys(), omd.keys(), d.values(), omd.values()),
            zip(
                six.iteritems(d),
                six.iteritems(omd),
                six.iterkeys(d),
                six.iterkeys(omd),
                six.itervalues(d),
                six.itervalues(omd),
            ),
        ]
        for iterator in iterators:
            for ditem, omditem, dkey, omdkey, dvalue, omdvalue in iterator:
                assert dkey == omdkey
                assert ditem == omditem
                assert dvalue == omdvalue

        # pop().
        dcopy, omdcopy = d.copy(), omd.copy()
        while dcopy and omdcopy:
            dpop = dcopy.pop(list(dcopy.keys())[0])
            omdpop = omdcopy.pop(list(omdcopy.keys())[0])
            assert dpop == omdpop
        # popitem().
        dcopy, omdcopy = d.copy(), omd.copy()
        while dcopy and omdcopy:
            assert dcopy.popitem() == omdcopy.popitem()

        # __getitem__().
        for dkey, omdkey in zip(six.iterkeys(d), six.iterkeys(omd)):
            assert d[dkey] == omd[omdkey]
        # __setitem__().
        for dkey, omdkey in zip(d, omd):
            d[dkey] = _unique
            omd[omdkey] = _unique
            assert dkey == omdkey and d[dkey] == omd[omdkey]
        # __delitem__().
        while d and omd:
            dkey, omdkey = list(d.keys())[0], list(omd.keys())[0]
            del d[dkey]
            del omd[omdkey]
            assert dkey == omdkey and dkey not in d and omdkey not in omd
Example #26
0
def _update_list_with_key(old_list, new_list, key,
                          preserve_old=False, update_value_fn=None):
    """Update a SQLAlchemy list-relationship, using key for identity

    Make old_list look like new_list, in a similar way to _update_dict, as
    if the list was a dictionary with key computed using the key function.

    If preserve_old is true, elements in old_list with a key not present in
    new_list will be preserved.

    """
    if update_value_fn is None:
        update_value_fn = _update_object

    old_dict = dict((key(v), v) for v in old_list)
    new_dict = dict((key(v), v) for v in new_list)

    for k in set(iterkeys(old_dict)) | set(iterkeys(new_dict)):
        if k in new_dict:
            if k not in old_dict:
                # Add new value to the old dictionary.
                temp = new_dict[k]
                new_list.remove(temp)
                old_list.append(temp)
            else:
                # Update the value in old_dict with the new value.
                update_value_fn(old_dict[k], new_dict[k])
        elif not preserve_old:
            # Remove the old value not anymore present.
            old_list.remove(old_dict[k])
Example #27
0
def compute_edge_map(mesh0, mesh1):
    """
    Compute map from edges of mesh0 to vertices of mesh1.

    *Arguments*
        mesh0
            a :py:class:`Mesh <dolfin.cpp.Mesh>`.
        mesh1
            a :py:class:`Mesh <dolfin.cpp.Mesh>`.

    It is assumed that both meshes have a :py:class:`MeshFunction
    <dolfin.cpp.MeshFunction>` over the vertices named
    "parent_vertex_indices" which contain a mapping from the
    local vertices to a common parent vertex numbering.

    """

    # Check arguments
    if not isinstance(mesh0, Mesh):
        raise TypeError("expected 'Mesh' as argument")
    if not isinstance(mesh1, Mesh):
        raise TypeError("expected 'Mesh' as argument")

    # Get parent vertex numbers
    vertices0 = mesh0.data().array("parent_vertex_indices", 0)
    vertices1 = mesh1.data().array("parent_vertex_indices", 0)

    # Check mappings
    if len(vertices0) == 0  or len(vertices1) == 0:
        cpp.dolfin_error("ale.py",
                         "compute edge map",
                         "Parent vertex indices are missing")

    # Initialize edges
    mesh0.init(1)
    mesh1.init(1)

    # Build parent to local map from vertex pair to local edge for mesh0
    parent_to_local_mesh0 = {}
    for edge in edges(mesh0):
        v = [vertices0[int(i)] for i in edge.entities(0)]
        v.sort()
        parent_to_local_mesh0[tuple(v)] = edge.index()

    # Build parent to local map from vertex pair to local edge for mesh1
    parent_to_local_mesh1 = {}
    for edge in edges(mesh1):
        v = [vertices1[int(i)] for i in edge.entities(0)]
        v.sort()
        parent_to_local_mesh1[tuple(v)] = edge.index()

    # Get common edges
    common_edges = set(six.iterkeys(parent_to_local_mesh0)).intersection(set(six.iterkeys(parent_to_local_mesh1)))

    # Compute map
    edge_map = {}
    for edge in common_edges:
        edge_map[parent_to_local_mesh0[edge]] = parent_to_local_mesh1[edge]

    return edge_map
    def setUp(self):
        super(TestOVNGatewayScheduler, self).setUp()

        # Overwritten by derived classes
        self.l3_scheduler = None

        # Used for unit tests
        self.new_router_name = 'router_new'
        self.fake_chassis_router_mappings = {
            'None': {'Chassis': [],
                     'Routers': {'r1': ovn_const.OVN_GATEWAY_INVALID_CHASSIS}},
            'Multiple1': {'Chassis': ['hv1', 'hv2'],
                          'Routers': {'r1': 'hv1', 'r2': 'hv2', 'r3': 'hv1'}},
            'Multiple2': {'Chassis': ['hv1', 'hv2', 'hv3'],
                          'Routers': {'r1': 'hv1', 'r2': 'hv1', 'r3': 'hv1'}},
            'Multiple3': {'Chassis': ['hv1', 'hv2', 'hv3'],
                          'Routers': {'r1': 'hv3', 'r2': 'hv2', 'r3': 'hv2'}}
            }

        # Determine the chassis to router list bindings
        for details in six.itervalues(self.fake_chassis_router_mappings):
            self.assertNotIn(self.new_router_name,
                             six.iterkeys(details['Routers']))
            details.setdefault('Chassis_Bindings', {})
            for chassis in details['Chassis']:
                details['Chassis_Bindings'].setdefault(chassis, [])
            for router, chassis in six.iteritems(details['Routers']):
                if chassis in six.iterkeys(details['Chassis_Bindings']):
                    details['Chassis_Bindings'][chassis].append(router)
Example #29
0
def _update_dict(old_dict, new_dict, update_value_fn=None):
    """Update a SQLAlchemy relationship with type dict

    Make old_dict look like new_dict, by:
    - calling update_value_fn to overwrite the values of old_dict with a
      corresponding value in new_dict;
    - deleting all entries in old_dict whose key is not in new_dict;
    - moving all entries in new_dict whose key is not in old_dict.

    """
    if update_value_fn is None:
        update_value_fn = _update_object
    for key in set(iterkeys(old_dict)) | set(iterkeys(new_dict)):
        if key in new_dict:
            if key not in old_dict:
                # Move the object from new_dict to old_dict. For some funny
                # behavior of SQLAlchemy-instrumented collections when
                # copying values, that resulted in new objects being added
                # to the session.
                temp = new_dict[key]
                del new_dict[key]
                old_dict[key] = temp
            else:
                # Update the old value with the new value.
                update_value_fn(old_dict[key], new_dict[key])
        else:
            # Delete the old value if no new value for that key.
            del old_dict[key]
Example #30
0
def score_chipmatch_csum(qaid, chipmatch, qreq_):
    """
    score_chipmatch_csum

    Args:
        chipmatch (tuple):

    Returns:
        tuple: aid_list, score_list

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.model.hots.voting_rules2 import *  # NOQA
        >>> ibs, qreq_, qaid, chipmatch = get_chipmatch_testdata()
        >>> (aid_list, score_list) = score_chipmatch_csum(qaid, chipmatch, qreq_)
        >>> print(aid_list, score_list)
    """
    #(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
    aid2_fsv = chipmatch.aid2_fsv
    if False:
        aid2_fs = {aid: fsv.prod(axis=1) for aid, fsv in six.iteritems(aid2_fsv)}
        aid_list = list(six.iterkeys(aid2_fs))
        fs_list = ut.dict_take(aid2_fs, aid_list)
        #fs_list  = list(six.itervalues(aid2_fs))
        score_list = [np.sum(fs) for fs in fs_list]
    else:
        aid_list = list(six.iterkeys(aid2_fsv))
        fsv_list = ut.dict_take(aid2_fsv, aid_list)
        fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
        score_list = [np.sum(fs) for fs in fs_list]
    return (aid_list, score_list)
Example #31
0
 def iterkeys(self):
     return six.iterkeys(self._table)
Example #32
0
 def __iter__(self):
     return six.iterkeys(self.weak_key_dict)
Example #33
0
    def build_from_token_counts(
        self,
        token_counts,
        min_count,
        num_iterations=4,
        reserved_tokens=None,
        max_subtoken_length=None,
    ):
        """Train a SubwordTextEncoder based on a dictionary of word counts.

    Args:
      token_counts: a dictionary of Unicode strings to int.
      min_count: an integer - discard subtokens with lower counts.
      num_iterations: an integer.  how many iterations of refinement.
      reserved_tokens: List of reserved tokens. The global variable
        `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
        argument is `None`, it will use `RESERVED_TOKENS`.
      max_subtoken_length: Maximum length of a subtoken. If this is not set,
        then the runtime and memory use of creating the vocab is quadratic in
        the length of the longest token. If this is set, then it is instead
        O(max_subtoken_length * length of longest token).

    Raises:
      ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
        is not clear what the space is being reserved for, or when it will be
        filled in.
    """
        if reserved_tokens is None:
            reserved_tokens = RESERVED_TOKENS
        else:
            # There is not complete freedom in replacing RESERVED_TOKENS.
            for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
                if default != proposed:
                    raise ValueError('RESERVED_TOKENS must be a prefix of '
                                     'reserved_tokens.')

        # Initialize the alphabet. Note, this must include reserved tokens or it can
        # result in encoding failures.
        alphabet_tokens = chain(
            six.iterkeys(token_counts),
            [native_to_unicode(t) for t in reserved_tokens],
        )

        self._init_alphabet_from_tokens(alphabet_tokens)

        # Bootstrap the initial list of subtokens with the characters from the
        # alphabet plus the escaping characters.
        self._init_subtokens_from_list(list(self._alphabet),
                                       reserved_tokens=reserved_tokens)

        # We build iteratively.  On each iteration, we segment all the words,
        # then count the resulting potential subtokens, keeping the ones
        # with high enough counts for our new vocabulary.
        if min_count < 1:
            min_count = 1
        for i in range(num_iterations):
            tf.logging.info('Iteration {0}'.format(i))

            # Collect all substrings of the encoded token that break along current
            # subtoken boundaries.
            subtoken_counts = collections.defaultdict(int)
            for token, count in six.iteritems(token_counts):
                iter_start_time = time.time()
                escaped_token = _escape_token(token, self._alphabet)
                subtokens = self._escaped_token_to_subtoken_strings(
                    escaped_token)
                start = 0
                for subtoken in subtokens:
                    last_position = len(escaped_token) + 1
                    if max_subtoken_length is not None:
                        last_position = min(last_position,
                                            start + max_subtoken_length)

                    for end in range(start + 1, last_position):
                        new_subtoken = escaped_token[start:end]
                        subtoken_counts[new_subtoken] += count
                    start += len(subtoken)
                iter_time_secs = time.time() - iter_start_time
                if iter_time_secs > 0.1:
                    tf.logging.info(
                        u'Processing token [{0}] took {1} seconds, consider '
                        'setting Text2TextProblem.max_subtoken_length to a '
                        'smaller value.'.format(token, iter_time_secs))

            # Array of sets of candidate subtoken strings, by length.
            len_to_subtoken_strings = []
            for subtoken_string, count in six.iteritems(subtoken_counts):
                lsub = len(subtoken_string)
                if count >= min_count:
                    while len(len_to_subtoken_strings) <= lsub:
                        len_to_subtoken_strings.append(set())
                    len_to_subtoken_strings[lsub].add(subtoken_string)

            # Consider the candidates longest to shortest, so that if we accept
            # a longer subtoken string, we can decrement the counts of its prefixes.
            new_subtoken_strings = []
            for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
                subtoken_strings = len_to_subtoken_strings[lsub]
                for subtoken_string in subtoken_strings:
                    count = subtoken_counts[subtoken_string]
                    if count >= min_count:
                        # Exclude alphabet tokens here, as they must be included later,
                        # explicitly, regardless of count.
                        if subtoken_string not in self._alphabet:
                            new_subtoken_strings.append(
                                (count, subtoken_string))
                        for l in range(1, lsub):
                            subtoken_counts[subtoken_string[:l]] -= count

            # Include the alphabet explicitly to guarantee all strings are encodable.
            new_subtoken_strings.extend(
                (subtoken_counts.get(a, 0), a) for a in self._alphabet)
            new_subtoken_strings.sort(reverse=True)

            # Reinitialize to the candidate vocabulary.
            new_subtoken_strings = [
                subtoken for _, subtoken in new_subtoken_strings
            ]
            if reserved_tokens:
                escaped_reserved_tokens = [
                    _escape_token(native_to_unicode(t), self._alphabet)
                    for t in reserved_tokens
                ]
                new_subtoken_strings = (escaped_reserved_tokens +
                                        new_subtoken_strings)

            self._init_subtokens_from_list(new_subtoken_strings)
            tf.logging.info('vocab_size = %d' % self.vocab_size)
Example #34
0
 def values(self):
     return [self.get(k) for k in iterkeys(self._fields)]
Example #35
0
 def items(self):
     return [(k, self.get(k)) for k in iterkeys(self._fields)]
Example #36
0
    def from_bdf(cls, cards):
        _ft = {
            None: DataHelper.default_int,
            '': DataHelper.default_int,
            'HILL': 1,
            'HOFF': 2,
            'TSAI': 3,
            'STRN': 4
        }

        # TODO: check that sout is correct
        _convert_sout = {'YES': 1, 'NO': 0}

        ply = {
            'IDENTITY': {
                'GPLYID': [],
                'MID': [],
                'THICK': [],
                'THETA': [],
                'SOUT': [],
                'MIDMTX': [],
                'VF': [],
                'VV': [],
                'CTEMP': [],
                'MOIST': [],
                'CRIT': [],
                'NFTI': [],
                'FTI': []
            }
        }

        result = {
            'IDENTITY': {
                'PID': [],
                'NPLIES': [],
                'Z0': [],
                'NSM': [],
                'SB': [],
                'FT': [],
                'TREF': [],
                'GE': [],
                'MICRO': [],
                'PLY_POS': [],
                'PLY_LEN': [],
                'DOMAIN_ID': []
            },
            'PLY': ply,
            '_subtables': ['PLY']
        }

        identity = result['IDENTITY']
        pid = identity['PID']
        nplies = identity['NPLIES']
        z0 = identity['Z0']
        nsm = identity['NSM']
        sb = identity['SB']
        ft = identity['FT']
        tref = identity['TREF']
        ge = identity['GE']
        micro = identity['MICRO']
        ply_pos = identity['PLY_POS']
        ply_len = identity['PLY_LEN']

        ply = ply['IDENTITY']
        gplyid = ply['GPLYID']
        mid = ply['MID']
        thick = ply['THICK']
        theta = ply['THETA']
        sout = ply['SOUT']
        midmtx = ply['MIDMTX']
        vf = ply['VF']
        vv = ply['VV']
        ctemp = ply['CTEMP']
        moist = ply['MOIST']
        crit = ply['CRIT']
        nfti = ply['NFTI']
        fti = ply['FTI']

        card_ids = sorted(iterkeys(cards))

        _pos = 0
        for card_id in card_ids:
            card = cards[card_id]

            pid.append(card.pid)
            n = len(card.thicknesses)
            nplies.append(n)
            z0.append(card.z0)
            nsm.append(card.nsm)
            sb.append(card.sb)
            ft.append(_ft[card.ft])
            tref.append(card.tref)
            ge.append(card.ge)
            micro.append(DataHelper.unknown_str)
            ply_pos.append(_pos)
            ply_len.append(n)
            _pos += n

            gplyid += list(card.global_ply_ids)
            mid += list(card.mids)
            thick += list(card.thicknesses)
            theta += list(card.thetas)
            sout += [_convert_sout[_] for _ in card.souts]
            midmtx += [DataHelper.unknown_int] * n
            vf += [DataHelper.unknown_double] * n
            vv += [DataHelper.unknown_double] * n
            ctemp += [DataHelper.unknown_double] * n
            moist += [DataHelper.unknown_double] * n
            crit += [DataHelper.unknown_str] * n
            nfti += [DataHelper.unknown_int] * n
            fti += [DataHelper.unknown_str] * n

        return result
Example #37
0
    def from_bdf(cls, cards):
        _ft = {
            None: DataHelper.default_int,
            '': DataHelper.default_int,
            'HILL': 1,
            'HOFF': 2,
            'TSAI': 3,
            'STRN': 4
        }

        # TODO: check that sout is correct
        _convert_sout = {'YES': 1, 'NO': 0}

        ply = {'IDENTITY': {'MID': [], 'T': [], 'THETA': [], 'SOUT': []}}

        data = {
            'IDENTITY': {
                'PID': [],
                'NPLIES': [],
                'Z0': [],
                'NSM': [],
                'SB': [],
                'FT': [],
                'TREF': [],
                'GE': [],
                'PLY_POS': [],
                'PLY_LEN': []
            },
            'PLY': ply,
            '_subtables': ['PLY']
        }

        identity = data['IDENTITY']
        pid = identity['PID']
        nplies = identity['NPLIES']
        z0 = identity['Z0']
        nsm = identity['NSM']
        sb = identity['SB']
        ft = identity['FT']
        tref = identity['TREF']
        ge = identity['GE']
        ply_pos = identity['PLY_POS']
        ply_len = identity['PLY_LEN']

        ply = ply['IDENTITY']
        mid = ply['MID']
        t = ply['T']
        theta = ply['THETA']
        sout = ply['SOUT']

        card_ids = sorted(iterkeys(cards))

        _ply_pos = 0
        for card_id in card_ids:
            card = cards[card_id]

            _plies = len(card.material_ids)

            pid.append(card.pid)
            nplies.append(_plies)
            z0.append(round(card.z0, 15))
            nsm.append(card.nsm)
            sb.append(card.sb)
            ft.append(_ft[card.ft])
            tref.append(card.tref)
            ge.append(card.ge)
            ply_pos.append(_ply_pos)
            ply_len.append(_plies)
            _ply_pos += _plies

            mid.extend(list(card.material_ids))
            t.extend(list(card.thicknesses))
            theta.extend(card.thetas)
            sout.extend([_convert_sout.get(_, 0) for _ in card.souts])

        return data
Example #38
0
from livvkit.util import colormaps
from livvkit.util.LIVVDict import LIVVDict
from livvkit.util import elements


def _run_suite(case, config, summary):
    """ Run the full suite of verification tests """
    config["name"] = case
    model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case)
    bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case)
    tabs = []
    case_summary = LIVVDict()
    model_cases = functions.collect_cases(model_dir)
    bench_cases = functions.collect_cases(bench_dir)

    for subcase in sorted(six.iterkeys(model_cases)):
        bench_subcases = bench_cases[subcase] if subcase in bench_cases else []
        case_sections = []
        for mcase in sorted(model_cases[subcase],
                            key=functions.sort_processor_counts):
            bpath = (os.path.join(bench_dir, subcase, mcase.replace(
                "-", os.sep)) if mcase in bench_subcases else "")
            mpath = os.path.join(model_dir, subcase,
                                 mcase.replace("-", os.sep))
            case_result = _analyze_case(mpath, bpath, config)
            case_sections.append(elements.section(mcase, case_result))
            case_summary[subcase] = _summarize_result(case_result,
                                                      case_summary[subcase])
        tabs.append(elements.tab(subcase, section_list=case_sections))

    result = elements.page(case, config["description"], tab_list=tabs)
Example #39
0
    def build_image_list(self):
        def process_source_installation(image, section):
            installation = dict()
            # NOTE(jeffrey4l): source is not needed when the type is None
            if self.conf._get('type', self.conf._get_group(section)) is None:
                if image.parent_name is None:
                    LOG.debug('No source location found in section %s',
                              section)
            else:
                installation['type'] = self.conf[section]['type']
                installation['source'] = self.conf[section]['location']
                installation['name'] = section
                if installation['type'] == 'git':
                    installation['reference'] = self.conf[section]['reference']
            return installation

        all_sections = (set(six.iterkeys(self.conf._groups))
                        | set(self.conf.list_all_sections()))

        for path in self.docker_build_paths:
            # Reading parent image name
            with open(os.path.join(path, 'Dockerfile')) as f:
                content = f.read()

            image_name = os.path.basename(path)
            canonical_name = (self.namespace + '/' + self.image_prefix +
                              image_name + ':' + self.tag)
            parent_search_pattern = re.compile(r'^FROM.*$', re.MULTILINE)
            match = re.search(parent_search_pattern, content)
            if match:
                parent_name = match.group(0).split(' ')[1]
            else:
                parent_name = ''
            del match
            image = Image(image_name,
                          canonical_name,
                          path,
                          parent_name=parent_name,
                          logger=utils.make_a_logger(self.conf, image_name),
                          docker_client=self.dc)

            if self.install_type == 'source':
                # NOTE(jeffrey4l): register the opts if the section didn't
                # register in the kolla/common/config.py file
                if image.name not in self.conf._groups:
                    self.conf.register_opts(common_config.get_source_opts(),
                                            image.name)
                image.source = process_source_installation(image, image.name)
                for plugin in [
                        match.group(0)
                        for match in (re.search(
                            '^{}-plugin-.+'.format(image.name), section)
                                      for section in all_sections) if match
                ]:
                    try:
                        self.conf.register_opts(
                            common_config.get_source_opts(), plugin)
                    except cfg.DuplicateOptError:
                        LOG.debug('Plugin %s already registered in config',
                                  plugin)
                    image.plugins.append(
                        process_source_installation(image, plugin))
                for addition in [
                        match.group(0)
                        for match in (re.search(
                            '^{}-additions-.+'.format(image.name), section)
                                      for section in all_sections) if match
                ]:
                    try:
                        self.conf.register_opts(
                            common_config.get_source_opts(), addition)
                    except cfg.DuplicateOptError:
                        LOG.debug('Addition %s already registered in config',
                                  addition)
                    image.additions.append(
                        process_source_installation(image, addition))

            self.images.append(image)
Example #40
0
    def _post(self, request):
        relay = request.relay
        assert relay is not None  # should be provided during Authentication

        full_config_requested = request.relay_request_data.get("fullConfig")

        if full_config_requested and not relay.is_internal:
            return Response("Relay unauthorized for full config information", 403)

        with Hub.current.start_span(op="relay_fetch_projects"):
            project_ids = set(request.relay_request_data.get("projects") or ())
            if project_ids:
                with metrics.timer("relay_project_configs.fetching_projects.duration"):
                    projects = {p.id: p for p in Project.objects.get_many_from_cache(project_ids)}
            else:
                projects = {}

        with Hub.current.start_span(op="relay_fetch_orgs"):
            # Preload all organizations and their options to prevent repeated
            # database access when computing the project configuration.
            org_ids = set(project.organization_id for project in six.itervalues(projects))
            if org_ids:
                with metrics.timer("relay_project_configs.fetching_orgs.duration"):
                    orgs = Organization.objects.get_many_from_cache(org_ids)
                    orgs = {o.id: o for o in orgs if request.relay.has_org_access(o)}
            else:
                orgs = {}

            with metrics.timer("relay_project_configs.fetching_org_options.duration"):
                for org_id in six.iterkeys(orgs):
                    OrganizationOption.objects.get_all_values(org_id)

        with Hub.current.start_span(op="relay_fetch_keys"):
            project_keys = {}
            for key in ProjectKey.objects.filter(project_id__in=project_ids):
                project_keys.setdefault(key.project_id, []).append(key)

        metrics.timing("relay_project_configs.projects_requested", len(project_ids))
        metrics.timing("relay_project_configs.projects_fetched", len(projects))
        metrics.timing("relay_project_configs.orgs_fetched", len(orgs))

        configs = {}
        for project_id in project_ids:
            configs[six.text_type(project_id)] = {"disabled": True}

            project = projects.get(int(project_id))
            if project is None:
                continue

            organization = orgs.get(project.organization_id)
            if organization is None:
                continue

            # Try to prevent organization from being fetched again in quotas.
            project.organization = organization
            project._organization_cache = organization

            with Hub.current.start_span(op="get_config"):
                with metrics.timer("relay_project_configs.get_config.duration"):
                    project_config = config.get_project_config(
                        project,
                        full_config=full_config_requested,
                        project_keys=project_keys.get(project.id) or [],
                    )

            configs[six.text_type(project_id)] = project_config.to_dict()

        if full_config_requested:
            projectconfig_cache.set_many(configs)

        return Response({"configs": configs}, status=200)
Example #41
0
    def _doLayout(self):

        dialogSizer = wx.BoxSizer(wx.VERTICAL)

        dialogSizer.Add(self.settsManager,
                        proportion=0,
                        flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
                        border=5)

        # connectin settings
        settingsSizer = wx.StaticBoxSizer(self.settingsBox, wx.VERTICAL)

        serverSizer = wx.FlexGridSizer(cols=3, vgap=5, hgap=5)

        serverSizer.Add(self.serverText, flag=wx.ALIGN_CENTER_VERTICAL)
        serverSizer.AddGrowableCol(1)
        serverSizer.Add(self.server, flag=wx.EXPAND | wx.ALL)

        serverSizer.Add(self.btn_connect)

        settingsSizer.Add(serverSizer,
                          proportion=0,
                          flag=wx.EXPAND | wx.LEFT | wx.RIGHT,
                          border=5)

        settingsSizer.Add(self.adv_conn, flag=wx.ALL | wx.EXPAND, border=5)

        dialogSizer.Add(settingsSizer,
                        proportion=0,
                        flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
                        border=5)

        # layer name, parsed capabilities

        reqDataSizer = wx.BoxSizer(wx.VERTICAL)

        layerNameSizer = wx.StaticBoxSizer(self.layerNameBox, wx.HORIZONTAL)

        layerNameSizer.Add(self.layerNameText,
                           flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL,
                           border=5)

        layerNameSizer.Add(self.layerName, flag=wx.EXPAND, proportion=1)

        reqDataSizer.Add(layerNameSizer,
                         flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.EXPAND,
                         border=5)

        self.ch_ws_sizer = wx.BoxSizer(wx.VERTICAL)

        reqDataSizer.Add(self.ch_ws_sizer,
                         proportion=0,
                         flag=wx.TOP | wx.EXPAND,
                         border=5)

        for ws in six.iterkeys(self.ws_panels):
            reqDataSizer.Add(self.ws_panels[ws]['panel'],
                             proportion=1,
                             flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.EXPAND,
                             border=5)
            self.ws_panels[ws]['panel'].Hide()

        dialogSizer.Add(self.reqDataPanel, proportion=1, flag=wx.EXPAND)

        self.reqDataPanel.SetSizer(reqDataSizer)
        self.reqDataPanel.Hide()

        # buttons
        self.btnsizer = wx.BoxSizer(orient=wx.HORIZONTAL)

        self.btnsizer.Add(self.btn_close,
                          proportion=0,
                          flag=wx.ALL | wx.ALIGN_CENTER,
                          border=10)

        dialogSizer.Add(self.btnsizer, proportion=0, flag=wx.ALIGN_CENTER)

        # expand wxWidget wx.StatusBar
        statusbarSizer = wx.BoxSizer(wx.HORIZONTAL)
        statusbarSizer.Add(self.statusbar, proportion=1, flag=wx.EXPAND)
        dialogSizer.Add(statusbarSizer, proportion=0, flag=wx.EXPAND)

        self.SetSizer(dialogSizer)
        self.Layout()

        self.SetMinSize((550, -1))
        self.SetMaxSize((-1, self.GetBestSize()[1]))
        self.Fit()
Example #42
0
            error_string = plugin_class.IsValidEnvironment()
            if error_string is not None:
                sys.stdout.write("INFO: {}\n".format(error_string))
                continue

            yield plugin_class


PLUGINS                                     = OrderedDict([(plugin.Name, plugin) for plugin in EnumeratePlugins()])
del EnumeratePlugins

if not PLUGINS:
    sys.stdout.write("\nWARNING: No plugins were found.\n")
    sys.exit(1)

_PluginTypeInfo                             = CommandLine.EnumTypeInfo(list(six.iterkeys(PLUGINS)))

# ----------------------------------------------------------------------
SUPPORTED_TYPES                             = set(
    [
        "int8",
        "int16",
        "int32",
        "int64",
        "uint8",
        "uint16",
        "uint32",
        "uint64",
        "float",
        "double",
        "string",
Example #43
0
def transitVisibilityPlot(allData,
                          markTransit=False,
                          plotLegend=True,
                          showMoonDist=True,
                          print2file=False):
    """
    Plot the visibility of transits.
    
    This function can conveniently be used with the output of
    the transitTimes function.
    
    Parameters
    ----------
    allData : dictionary
        Essentially the output of `transitTimes`.
        A dictionary mapping consecutive numbers (one per transit) to
        another dictionary providing the following keys:
        
          ============    ====================================================
          Key             Description
          ------------    ----------------------------------------------------
          Planet name     Name of the planet
          Transit jd      (Only if `markTransit is True)
                          Array giving JD of start, mid-time, and end of
                          transit.
          Obs jd          Array specifying the HJD of the start, center and
                          end of the observation.
          Obs cal         Equivalent to 'Obs jd', but in the form of the
                          calendar date. In particular, for each date, a list
                          containing [Year, month, day, fractional hours]
                          is given.
          Obs coord       East longitude [deg], latitude [deg], and
                          altitude [m] of the observatory.
          Star ra         Right ascension of the star [deg].
          Star dec        Declination of the star [deg].
          ============    ====================================================

        .. note:: To use the list created by transitTimes, the LONGITUDE and LATITUDE
                  of the observatory location must have been specified.
    markTransit : boolean, optional
        If True (default is False), the in-transit times will
        be clearly indicated in the plot.
        Note that this would not be the case otherwise, which is particularly
        important if extra off-transit time before and after the transit has been
        requested. 
    showMoonDist : boolean, optional
        If True (default), the Moon distance will be shown.
    print2file : boolean, optional
        If True, the plot will be dumped to a png-file named:
        "transitVis-"[planetName].png. The default is False.
  """
    from PyAstronomy.pyasl import _ic
    if not _ic.check["matplotlib"]:
        raise(PE.PyARequiredImport("matplotlib is not installed.", \
              where="transitVisibilityPlot", \
              solution="Install matplotlib (http://matplotlib.org/)"))

    import matplotlib
    import matplotlib.pylab as plt
    from mpl_toolkits.axes_grid1 import host_subplot
    from matplotlib.ticker import MultipleLocator
    from matplotlib.font_manager import FontProperties
    from matplotlib import rcParams

    rcParams['xtick.major.pad'] = 12

    if len(allData) == 0:
        raise(PE.PyAValError("Input dictionary is empty", \
              where="transitVisibilityPlot", \
              solution=["Use `transitTimes` to generate input dictionary",
                        "Did you forget to supply observer's location?", \
                        "If you used `transitTime`, you might need to change the call argument (e.g., times)"]))

    # Check whether all relevant data have been specified
    reqK = [
        "Obs jd", "Obs coord", "Star ra", "Star dec", "Obs cal", "Planet name"
    ]
    if markTransit:
        reqK.append("Transit jd")
    missingK = []
    for k in reqK:
        if not k in allData[1]:
            missingK.append(k)
    if len(missingK) > 0:
        raise(PE.PyAValError("The following keys are missing in the input dictionary: " + ', '.join(missingK), \
                             where="transitVisibilityPlot", \
                             solution="Did you specify observer's location in `transitTimes`?"))

    fig = plt.figure(figsize=(15, 10))
    fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
    ax = host_subplot(111)

    font0 = FontProperties()
    font1 = font0.copy()
    font0.set_family('sans-serif')
    font0.set_weight('light')
    font1.set_family('sans-serif')
    font1.set_weight('medium')

    for n in six.iterkeys(allData):
        # JD array
        jdbinsize = 1.0 / 24. / 10.
        jds = np.arange(allData[n]["Obs jd"][0], allData[n]["Obs jd"][2],
                        jdbinsize)
        # Get JD floating point
        jdsub = jds - np.floor(jds[0])
        # Get alt/az of object
        altaz = eq2hor.eq2hor(jds, np.ones(jds.size)*allData[n]["Star ra"], np.ones(jds.size)*allData[n]["Star dec"], \
                            lon=allData[n]["Obs coord"][0], lat=allData[n]["Obs coord"][1], \
                            alt=allData[n]["Obs coord"][2])
        # Get alt/az of Sun
        sunpos_altaz = eq2hor.eq2hor(jds, np.ones(jds.size)*allData[n]["Sun ra"], np.ones(jds.size)*allData[n]["Sun dec"], \
                                    lon=allData[n]["Obs coord"][0], lat=allData[n]["Obs coord"][1], \
                                    alt=allData[n]["Obs coord"][2])

        # Define plot label
        plabel = "[%02d]  %02d.%02d.%4d" % (n, allData[n]["Obs cal"][0][2], \
                                            allData[n]["Obs cal"][0][1], allData[n]["Obs cal"][0][0])

        # Find periods of: day, twilight, and night
        day = np.where(sunpos_altaz[0] >= 0.)[0]
        twi = np.where(
            np.logical_and(sunpos_altaz[0] > -18., sunpos_altaz[0] < 0.))[0]
        night = np.where(sunpos_altaz[0] <= -18.)[0]

        if (len(day) == 0) and (len(twi) == 0) and (len(night) == 0):
            print()
            print("transitVisibilityPlot - no points to draw for date %2d.%2d.%4d" \
                  % (allData[n]["Obs cal"][0][2], allData[n]["Obs cal"][0][1], allData[n]["Obs cal"][0][0]))
            print("Skip transit and continue with next")
            print()
            continue

        mpos = moonpos(jds)
        mpha = moonphase(jds)
        mpos_altaz = eq2hor.eq2hor(jds, mpos[0], mpos[1], lon=allData[n]["Obs coord"][0], \
                                   lat=allData[n]["Obs coord"][1], alt=allData[n]["Obs coord"][2])
        moonind = np.where(mpos_altaz[0] > 0.)[0]

        if showMoonDist:
            mdist = getAngDist(mpos[0], mpos[1], np.ones(jds.size)*allData[n]["Star ra"], \
                               np.ones(jds.size)*allData[n]["Star dec"])
            bindist = int((2.0 / 24.) / jdbinsize)
            firstbin = np.random.randint(0, bindist)
            for mp in range(0, int(len(jds) / bindist)):
                bind = int(firstbin + float(mp) * bindist)
                ax.text(jdsub[bind], altaz[0][bind]-1., str(int(mdist[bind]))+r"$^\circ$", ha="center", va="top", \
                        fontsize=8, stretch='ultra-condensed', fontproperties=font0, alpha=1.)

        if markTransit:
            # Mark points within transit. These may differ from that pertaining to the
            # observation if an extra offset was given to provide off-transit time.
            transit_only_ind = np.where( np.logical_and(jds >= allData[n]["Transit jd"][0], \
                                                        jds <= allData[n]["Transit jd"][2]) )[0]
            ax.plot(jdsub[transit_only_ind],
                    altaz[0][transit_only_ind],
                    'g',
                    linewidth=6,
                    alpha=.3)

        if len(twi) > 1:
            # There are points in twilight
            linebreak = np.where(
                (jdsub[twi][1:] - jdsub[twi][:-1]) > 2.0 * jdbinsize)[0]
            if len(linebreak) > 0:
                plotrjd = np.insert(jdsub[twi], linebreak + 1, np.nan)
                plotdat = np.insert(altaz[0][twi], linebreak + 1, np.nan)
                ax.plot(plotrjd, plotdat, "-", color='#BEBEBE', linewidth=1.5)
            else:
                ax.plot(jdsub[twi],
                        altaz[0][twi],
                        "-",
                        color='#BEBEBE',
                        linewidth=1.5)

        ax.plot(jdsub[night],
                altaz[0][night],
                'k',
                linewidth=1.5,
                label=plabel)
        ax.plot(jdsub[day], altaz[0][day], color='#FDB813', linewidth=1.5)

        altmax = np.argmax(altaz[0])
        ax.text( jdsub[altmax], altaz[0][altmax], str(n), color="b", fontsize=14, \
                 fontproperties=font1, va="bottom", ha="center")

        if n == 29:
            ax.text( 1.1, 1.0-float(n)*0.04, "too many transits", ha="left", va="top", transform=ax.transAxes, \
                    fontsize=10, fontproperties=font0, color="r")
        else:
            ax.text( 1.1, 1.0-float(n)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
                    fontsize=12, fontproperties=font0, color="b")

    ax.text( 1.1, 1.03, "Start of observation", ha="left", va="top", transform=ax.transAxes, \
            fontsize=12, fontproperties=font0, color="b")
    ax.text( 1.1, 1.0, "[No.]  Date", ha="left", va="top", transform=ax.transAxes, \
            fontsize=12, fontproperties=font0, color="b")

    axrange = ax.get_xlim()
    ax.set_xlabel("UT [hours]")

    if axrange[1] - axrange[0] <= 1.0:
        jdhours = np.arange(0, 3, 1.0 / 24.)
        utchours = (np.arange(0, 72, dtype=int) + 12) % 24
    else:
        jdhours = np.arange(0, 3, 1.0 / 12.)
        utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24
    ax.set_xticks(jdhours)
    ax.set_xlim(axrange)
    ax.set_xticklabels(utchours, fontsize=18)

    # Make ax2 responsible for "top" axis and "right" axis
    ax2 = ax.twin()
    # Set upper x ticks
    ax2.set_xticks(jdhours)
    ax2.set_xticklabels(utchours, fontsize=18)
    ax2.set_xlabel("UT [hours]")

    # Horizon angle for airmass
    airmass_ang = np.arange(5., 90., 5.)
    geo_airmass = airmass.airmassPP(90. - airmass_ang)
    ax2.set_yticks(airmass_ang)
    airmassformat = []
    for t in range(geo_airmass.size):
        airmassformat.append("%2.2f" % geo_airmass[t])
    ax2.set_yticklabels(airmassformat, rotation=90)
    ax2.set_ylabel("Relative airmass", labelpad=32)
    ax2.tick_params(axis="y", pad=10, labelsize=10)
    plt.text(1.015,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
             va='top', fontsize=10, rotation=90)

    ax22 = ax.twin()
    ax22.set_xticklabels([])
    ax22.set_frame_on(True)
    ax22.patch.set_visible(False)
    ax22.yaxis.set_ticks_position('right')
    ax22.yaxis.set_label_position('right')
    ax22.spines['right'].set_position(('outward', 25))
    ax22.spines['right'].set_color('k')
    ax22.spines['right'].set_visible(True)
    airmass2 = np.array([
        airmass.airmassSpherical(90. - ang, allData[n]["Obs coord"][2])
        for ang in airmass_ang
    ])
    ax22.set_yticks(airmass_ang)
    airmassformat = []
    for t in range(airmass2.size):
        airmassformat.append("%2.2f" % airmass2[t])
    ax22.set_yticklabels(airmassformat, rotation=90)
    ax22.tick_params(axis="y", pad=10, labelsize=10)
    plt.text(1.045,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
             fontsize=10, rotation=90)

    ax3 = ax.twiny()
    ax3.set_frame_on(True)
    ax3.patch.set_visible(False)
    ax3.xaxis.set_ticks_position('bottom')
    ax3.xaxis.set_label_position('bottom')
    ax3.spines['bottom'].set_position(('outward', 50))
    ax3.spines['bottom'].set_color('k')
    ax3.spines['bottom'].set_visible(True)

    ltime, ldiff = localtime.localTime(
        utchours, np.repeat(allData[n]["Obs coord"][0], len(utchours)))
    jdltime = jdhours - ldiff / 24.
    ax3.set_xticks(jdltime)
    ax3.set_xticklabels(utchours)
    ax3.set_xlim([axrange[0], axrange[1]])
    ax3.set_xlabel("Local time [hours]")

    ax.yaxis.set_major_locator(MultipleLocator(15))
    ax.yaxis.set_minor_locator(MultipleLocator(5))
    yticks = ax.get_yticks()
    ytickformat = []
    for t in range(yticks.size):
        ytickformat.append(str(int(yticks[t])) + r"$^\circ$")
    ax.set_yticklabels(ytickformat, fontsize=20)
    ax.set_ylabel("Altitude", fontsize=18)
    yticksminor = ax.get_yticks(minor=True)
    ymind = np.where(yticksminor % 15. != 0.)[0]
    yticksminor = yticksminor[ymind]
    ax.set_yticks(yticksminor, minor=True)
    m_ytickformat = []
    for t in range(yticksminor.size):
        m_ytickformat.append(str(int(yticksminor[t])) + r"$^\circ$")
    ax.set_yticklabels(m_ytickformat, minor=True)

    ax.yaxis.grid(color='gray', linestyle='dashed')
    ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
    ax2.xaxis.grid(color='gray', linestyle='dotted')

    plt.text(0.5,0.95,"Transit visibility of "+allData[n]["Planet name"].decode("utf8"), \
             transform=fig.transFigure, ha='center', va='bottom', fontsize=20)

    if plotLegend:
        line1 = matplotlib.lines.Line2D((0, 0), (1, 1),
                                        color='#FDB813',
                                        linestyle="-",
                                        linewidth=2)
        line2 = matplotlib.lines.Line2D((0, 0), (1, 1),
                                        color='#BEBEBE',
                                        linestyle="-",
                                        linewidth=2)
        line3 = matplotlib.lines.Line2D((0, 0), (1, 1),
                                        color='k',
                                        linestyle="-",
                                        linewidth=2)
        line4 = matplotlib.lines.Line2D((0, 0), (1, 1),
                                        color='g',
                                        linestyle="-",
                                        linewidth=6,
                                        alpha=.3)

        if markTransit:
            lgd2 = plt.legend((line1,line2,line3, line4),("day","twilight","night","transit",), \
                              bbox_to_anchor=(0.88, 0.15), loc=2, borderaxespad=0.,prop={'size':12}, fancybox=True)
        else:
            lgd2 = plt.legend((line1,line2,line3),("day","twilight","night",), \
                              bbox_to_anchor=(0.88, 0.13), loc=2, borderaxespad=0.,prop={'size':12}, fancybox=True)
        lgd2.get_frame().set_alpha(.5)

    targetco = r"Target coordinates: (%8.4f$^\circ$, %8.4f$^\circ$)" % \
              (allData[n]["Star ra"], allData[n]["Star dec"])
    obsco = "Obs coord.: (%8.4f$^\circ$, %8.4f$^\circ$, %4d m)" % \
            (allData[n]["Obs coord"][0], allData[n]["Obs coord"][1], allData[n]["Obs coord"][2])
    plt.text(0.01,
             0.97,
             targetco,
             transform=fig.transFigure,
             ha='left',
             va='center',
             fontsize=10)
    plt.text(0.01,
             0.95,
             obsco,
             transform=fig.transFigure,
             ha='left',
             va='center',
             fontsize=10)

    if print2file:
        outfile = "transVis-" + allData[n]["Planet name"].replace(" ",
                                                                  "") + ".png"
        plt.savefig(outfile, format="png", dpi=300)
    else:
        plt.show()
Example #44
0
class Silicone(datasets.GeneratorBasedBuilder):
    """The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark."""

    BUILDER_CONFIGS = [
        SiliconeConfig(
            name="dyda_da",
            description=textwrap.dedent(
                """\
            The DailyDialog Act Corpus contains multi-turn dialogues and is supposed to reflect daily
            communication by covering topics about daily life. The dataset is manually labelled with
             dialog act and emotions. It is the third biggest corpus of SILICONE with 102k utterances."""
            ),
            text_features={
                "Utterance": "Utterance",
                "Dialogue_Act": "Dialogue_Act",
                "Dialogue_ID": "Dialogue_ID",
            },
            label_classes=["commissive", "directive", "inform", "question"],
            label_column="Dialogue_Act",
            data_url={
                "train": _URL + "/dyda/train.csv",
                "dev": _URL + "/dyda/dev.csv",
                "test": _URL + "/dyda/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @InProceedings{li2017dailydialog,
            author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
            title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
            booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},
            year = {2017}
            }"""
            ),
            url="http://yanran.li/dailydialog.html",
        ),
        SiliconeConfig(
            name="dyda_e",
            description=textwrap.dedent(
                """\
            The DailyDialog Act Corpus contains multi-turn dialogues and is supposed to reflect daily
            communication by covering topics about daily life. The dataset is manually labelled with
             dialog act and emotions. It is the third biggest corpus of SILICONE with 102k utterances."""
            ),
            text_features={
                "Utterance": "Utterance",
                "Emotion": "Emotion",
                "Dialogue_ID": "Dialogue_ID",
            },
            label_classes=["anger", "disgust", "fear", "happiness", "no emotion", "sadness", "surprise"],
            label_column="Emotion",
            data_url={
                "train": _URL + "/dyda/train.csv",
                "dev": _URL + "/dyda/dev.csv",
                "test": _URL + "/dyda/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @InProceedings{li2017dailydialog,
            author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
            title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
            booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},
            year = {2017}
            }"""
            ),
            url="http://yanran.li/dailydialog.html",
        ),
        SiliconeConfig(
            name="iemocap",
            description=textwrap.dedent(
                """\
            The IEMOCAP database is a multi-modal database of ten speakers. It consists of dyadic
            sessions where actors perform improvisations or scripted scenarios. Emotion categories
            are: anger, happiness, sadness, neutral, excitement, frustration, fear, surprise, and other.
            There is no official split of this dataset."""
            ),
            text_features={
                "Dialogue_ID": "Dialogue_ID",
                "Utterance_ID": "Utterance_ID",
                "Utterance": "Utterance",
                "Emotion": "Emotion",
            },
            label_classes=list(six.iterkeys(IEMOCAP_E_DESCRIPTION)),
            label_column="Emotion",
            data_url={
                "train": _URL + "/iemocap/train.csv",
                "dev": _URL + "/iemocap/dev.csv",
                "test": _URL + "/iemocap/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @article{busso2008iemocap,
            title={IEMOCAP: Interactive emotional dyadic motion capture database},
            author={Busso, Carlos and Bulut, Murtaza and Lee, Chi-Chun and Kazemzadeh, Abe and Mower,
            Emily and Kim, Samuel and Chang, Jeannette N and Lee, Sungbok and Narayanan, Shrikanth S},
            journal={Language resources and evaluation},
            volume={42},
            number={4},
            pages={335},
            year={2008},
            publisher={Springer}
            }"""
            ),
            url="https://sail.usc.edu/iemocap/",
        ),
        SiliconeConfig(
            name="maptask",
            description=textwrap.dedent(
                """\
            The HCRC MapTask Corpus was constructed through the verbal collaboration of participants
             in order to construct a map route. This corpus is small (27k utterances). As there is
             no standard train/dev/test split performance depends on the split."""
            ),
            text_features={
                "Speaker": "Speaker",
                "Utterance": "Utterance",
                "Dialogue_Act": "Dialogue_Act",
            },
            label_classes=[
                "acknowledge",
                "align",
                "check",
                "clarify",
                "explain",
                "instruct",
                "query_w",
                "query_yn",
                "ready",
                "reply_n",
                "reply_w",
                "reply_y",
            ],
            label_column="Dialogue_Act",
            data_url={
                "train": _URL + "/maptask/train.txt",
                "dev": _URL + "/maptask/dev.txt",
                "test": _URL + "/maptask/test.txt",
            },
            citation=textwrap.dedent(
                """\
            @inproceedings{thompson1993hcrc,
            title={The HCRC map task corpus: natural dialogue for speech recognition},
            author={Thompson, Henry S and Anderson, Anne H and Bard, Ellen Gurman and Doherty-Sneddon,
            Gwyneth and Newlands, Alison and Sotillo, Cathy},
            booktitle={HUMAN LANGUAGE TECHNOLOGY: Proceedings of a Workshop Held at Plainsboro, New Jersey, March 21-24, 1993},
            year={1993}
            }"""
            ),
            url="http://groups.inf.ed.ac.uk/maptask/",
        ),
        SiliconeConfig(
            name="meld_e",
            description=textwrap.dedent(
                """\
            The Multimodal EmotionLines Dataset enhances and extends the EmotionLines dataset where
            multiple speakers participate in the dialogue."""
            ),
            text_features={
                "Utterance": "Utterance",
                "Speaker": "Speaker",
                "Emotion": "Emotion",
                "Dialogue_ID": "Dialogue_ID",
                "Utterance_ID": "Utterance_ID",
            },
            label_classes=["anger", "disgust", "fear", "joy", "neutral", "sadness", "surprise"],
            label_column="Emotion",
            data_url={
                "train": _URL + "/meld/train.csv",
                "dev": _URL + "/meld/dev.csv",
                "test": _URL + "/meld/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @article{chen2018emotionlines,
            title={Emotionlines: An emotion corpus of multi-party conversations},
            author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
            journal={arXiv preprint arXiv:1802.08379},
            year={2018}
            }"""
            ),
            url="https://affective-meld.github.io/",
        ),
        SiliconeConfig(
            name="meld_s",
            description=textwrap.dedent(
                """\
            The Multimodal EmotionLines Dataset enhances and extends the EmotionLines dataset where
            multiple speakers participate in the dialogue."""
            ),
            text_features={
                "Utterance": "Utterance",
                "Speaker": "Speaker",
                "Sentiment": "Sentiment",
                "Dialogue_ID": "Dialogue_ID",
                "Utterance_ID": "Utterance_ID",
            },
            label_classes=["negative", "neutral", "positive"],
            label_column="Sentiment",
            data_url={
                "train": _URL + "/meld/train.csv",
                "dev": _URL + "/meld/dev.csv",
                "test": _URL + "/meld/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @article{chen2018emotionlines,
            title={Emotionlines: An emotion corpus of multi-party conversations},
            author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
            journal={arXiv preprint arXiv:1802.08379},
            year={2018}
            }"""
            ),
            url="https://affective-meld.github.io/",
        ),
        SiliconeConfig(
            name="mrda",
            description=textwrap.dedent(
                """\
            ICSI MRDA Corpus consist of transcripts of multi-party meetings hand-annotated with dialog
            acts. It is the second biggest dataset with around 110k utterances."""
            ),
            text_features={
                "Utterance_ID": "Utterance_ID",
                "Dialogue_Act": "Dialogue_Act",
                "Channel_ID": "Channel_ID",
                "Speaker": "Speaker",
                "Dialogue_ID": "Dialogue_ID",
                "Utterance": "Utterance",
            },
            label_classes=list(six.iterkeys(MRDA_DA_DESCRIPTION)),
            label_column="Dialogue_Act",
            data_url={
                "train": _URL + "/mrda/train.csv",
                "dev": _URL + "/mrda/dev.csv",
                "test": _URL + "/mrda/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @techreport{shriberg2004icsi,
            title={The ICSI meeting recorder dialog act (MRDA) corpus},
            author={Shriberg, Elizabeth and Dhillon, Raj and Bhagat, Sonali and Ang, Jeremy and Carvey, Hannah},
            year={2004},
            institution={INTERNATIONAL COMPUTER SCIENCE INST BERKELEY CA}
            }"""
            ),
            url="https://www.aclweb.org/anthology/W04-2319",
        ),
        SiliconeConfig(
            name="oasis",
            description=textwrap.dedent(
                """\
            The Bt Oasis Corpus (Oasis) contains the transcripts of live calls made to the BT and
            operator services. This corpus is rather small (15k utterances). There is no standard
            train/dev/test split."""
            ),
            text_features={
                "Speaker": "Speaker",
                "Utterance": "Utterance",
                "Dialogue_Act": "Dialogue_Act",
            },
            label_classes=[
                "accept",
                "ackn",
                "answ",
                "answElab",
                "appreciate",
                "backch",
                "bye",
                "complete",
                "confirm",
                "correct",
                "direct",
                "directElab",
                "echo",
                "exclaim",
                "expressOpinion",
                "expressPossibility",
                "expressRegret",
                "expressWish",
                "greet",
                "hold",
                "identifySelf",
                "inform",
                "informCont",
                "informDisc",
                "informIntent",
                "init",
                "negate",
                "offer",
                "pardon",
                "raiseIssue",
                "refer",
                "refuse",
                "reqDirect",
                "reqInfo",
                "reqModal",
                "selfTalk",
                "suggest",
                "thank",
                "informIntent-hold",
                "correctSelf",
                "expressRegret-inform",
                "thank-identifySelf",
            ],
            label_column="Dialogue_Act",
            data_url={
                "train": _URL + "/oasis/train.txt",
                "dev": _URL + "/oasis/dev.txt",
                "test": _URL + "/oasis/test.txt",
            },
            citation=textwrap.dedent(
                """\
            @inproceedings{leech2003generic,
            title={Generic speech act annotation for task-oriented dialogues},
            author={Leech, Geoffrey and Weisser, Martin},
            booktitle={Proceedings of the corpus linguistics 2003 conference},
            volume={16},
            pages={441--446},
            year={2003},
            organization={Lancaster: Lancaster University}
            }"""
            ),
            url="http://groups.inf.ed.ac.uk/oasis/",
        ),
        SiliconeConfig(
            name="sem",
            description=textwrap.dedent(
                """\
            The SEMAINE database comes from the Sustained Emotionally coloured Human-Machine Interaction
            using Nonverbal Expression project. This dataset has been annotated on three sentiments
            labels: positive, negative and neutral. It is built on Multimodal Wizard of Oz experiment
            where participants held conversations with an operator who adopted various roles designed
            to evoke emotional reactions. There is no official split on this dataset."""
            ),
            text_features={
                "Utterance": "Utterance",
                "NbPairInSession": "NbPairInSession",
                "Dialogue_ID": "Dialogue_ID",
                "SpeechTurn": "SpeechTurn",
                "Speaker": "Speaker",
                "Sentiment": "Sentiment",
            },
            label_classes=["Negative", "Neutral", "Positive"],
            label_column="Sentiment",
            data_url={
                "train": _URL + "/sem/train.csv",
                "dev": _URL + "/sem/dev.csv",
                "test": _URL + "/sem/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @article{mckeown2011semaine,
            title={The semaine database: Annotated multimodal records of emotionally colored conversations
            between a person and a limited agent},
            author={McKeown, Gary and Valstar, Michel and Cowie, Roddy and Pantic, Maja and Schroder, Marc},
            journal={IEEE transactions on affective computing},
            volume={3},
            number={1},
            pages={5--17},
            year={2011},
            publisher={IEEE}
            }"""
            ),
            url="https://ieeexplore.ieee.org/document/5959155",
        ),
        SiliconeConfig(
            name="swda",
            description=textwrap.dedent(
                """\
            Switchboard Dialog Act Corpus (SwDA) is a telephone speech corpus consisting of two-sided
            telephone conversations with provided topics. This dataset includes additional features
            such as speaker id and topic information."""
            ),
            text_features={
                "Utterance": "Utterance",
                "Dialogue_Act": "Dialogue_Act",
                "From_Caller": "From_Caller",
                "To_Caller": "To_Caller",
                "Topic": "Topic",
                "Dialogue_ID": "Dialogue_ID",
                "Conv_ID": "Conv_ID",
            },
            label_classes=list(six.iterkeys(SWDA_DA_DESCRIPTION)),
            label_column="Dialogue_Act",
            data_url={
                "train": _URL + "/swda/train.csv",
                "dev": _URL + "/swda/dev.csv",
                "test": _URL + "/swda/test.csv",
            },
            citation=textwrap.dedent(
                """\
            @article{stolcke2000dialogue,
            title={Dialogue act modeling for automatic tagging and recognition of conversational speech},
            author={Stolcke, Andreas and Ries, Klaus and Coccaro, Noah and Shriberg, Elizabeth and
            Bates, Rebecca and Jurafsky, Daniel and Taylor, Paul and Martin, Rachel and Ess-Dykema,
            Carol Van and Meteer, Marie},
            journal={Computational linguistics},
            volume={26},
            number={3},
            pages={339--373},
            year={2000},
            publisher={MIT Press}
            }"""
            ),
            url="https://web.stanford.edu/~jurafsky/ws97/",
        ),
    ]

    def _info(self):
        features = {text_feature: datasets.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
        if self.config.label_classes:
            features["Label"] = datasets.features.ClassLabel(names=self.config.label_classes)
        features["Idx"] = datasets.Value("int32")
        return datasets.DatasetInfo(
            description=_SILICONE_DESCRIPTION,
            features=datasets.Features(features),
            homepage=self.config.url,
            citation=self.config.citation + "\n" + _SILICONE_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_files = dl_manager.download(self.config.data_url)
        splits = []
        splits.append(
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data_file": data_files["train"],
                    "split": "train",
                },
            )
        )
        splits.append(
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "data_file": data_files["dev"],
                    "split": "dev",
                },
            )
        )
        splits.append(
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "data_file": data_files["test"],
                    "split": "test",
                },
            )
        )
        return splits

    def _generate_examples(self, data_file, split):
        if self.config.name not in ("maptask", "iemocap", "oasis"):
            df = pd.read_csv(data_file, delimiter=",", header=0, quotechar='"', dtype=str)[
                six.iterkeys(self.config.text_features)
            ]

        if self.config.name == "iemocap":
            df = pd.read_csv(
                data_file,
                delimiter=",",
                header=0,
                quotechar='"',
                names=["Dialogue_ID", "Utterance_ID", "Utterance", "Emotion", "Valence", "Activation", "Dominance"],
                dtype=str,
            )[six.iterkeys(self.config.text_features)]

        if self.config.name in ("maptask", "oasis"):
            df = pd.read_csv(data_file, delimiter="|", names=["Speaker", "Utterance", "Dialogue_Act"], dtype=str)[
                six.iterkeys(self.config.text_features)
            ]

        rows = df.to_dict(orient="records")

        for n, row in enumerate(rows):
            example = row
            example["Idx"] = n

            if self.config.label_column in example:
                label = example[self.config.label_column]
                example["Label"] = label

            yield example["Idx"], example
Example #45
0
    def put(self, request, project):
        """
        Update a Project
        ````````````````

        Update various attributes and configurable settings for the given
        project.  Only supplied values are updated.

        :pparam string organization_slug: the slug of the organization the
                                          project belongs to.
        :pparam string project_slug: the slug of the project to delete.
        :param string name: the new name for the project.
        :param string slug: the new slug for the project.
        :param string team: the slug of new team for the project.
        :param string platform: the new platform for the project.
        :param boolean isBookmarked: in case this API call is invoked with a
                                     user context this allows changing of
                                     the bookmark flag.
        :param int digestsMinDelay:
        :param int digestsMaxDelay:
        :auth: required
        """
        has_project_write = (
            (request.auth and request.auth.has_scope('project:write'))
            or (request.access and request.access.has_scope('project:write')))

        if has_project_write:
            serializer_cls = ProjectAdminSerializer
        else:
            serializer_cls = ProjectMemberSerializer

        serializer = serializer_cls(
            data=request.DATA,
            partial=True,
            context={
                'project': project,
                'request': request,
            },
        )
        if not serializer.is_valid():
            return Response(serializer.errors, status=400)

        result = serializer.object

        if not has_project_write:
            for key in six.iterkeys(ProjectAdminSerializer.base_fields):
                if request.DATA.get(key) and not result.get(key):
                    return Response(
                        {
                            'detail': [
                                'You do not have permission to perform this action.'
                            ]
                        },
                        status=403)

        changed = False
        if result.get('slug'):
            project.slug = result['slug']
            changed = True

        if result.get('name'):
            project.name = result['name']
            changed = True

        old_team_id = None
        if result.get('team'):
            team_list = [
                t for t in Team.objects.get_for_user(
                    organization=project.organization,
                    user=request.user,
                ) if request.access.has_team_scope(t, 'project:write')
                if t.slug == result['team']
            ]
            if not team_list:
                return Response({'detail': ['The new team is not found.']},
                                status=400)
            # TODO(jess): update / deprecate this functionality
            try:
                old_team_id = project.teams.values_list('id', flat=True)[0]
            except IndexError:
                pass

            project.team = team_list[0]
            changed = True

        if result.get('platform'):
            project.platform = result['platform']
            changed = True

        if changed:
            project.save()
            if old_team_id is not None:
                ProjectTeam.objects.filter(
                    project=project,
                    team_id=old_team_id,
                ).update(team=project.team)

        if result.get('isBookmarked'):
            try:
                with transaction.atomic():
                    ProjectBookmark.objects.create(
                        project_id=project.id,
                        user=request.user,
                    )
            except IntegrityError:
                pass
        elif result.get('isBookmarked') is False:
            ProjectBookmark.objects.filter(
                project_id=project.id,
                user=request.user,
            ).delete()

        if result.get('digestsMinDelay'):
            project.update_option('digests:mail:minimum_delay',
                                  result['digestsMinDelay'])
        if result.get('digestsMaxDelay'):
            project.update_option('digests:mail:maximum_delay',
                                  result['digestsMaxDelay'])
        if result.get('subjectPrefix') is not None:
            project.update_option('mail:subject_prefix',
                                  result['subjectPrefix'])
        if result.get('subjectTemplate'):
            project.update_option('mail:subject_template',
                                  result['subjectTemplate'])
        if result.get('defaultEnvironment') is not None:
            project.update_option('sentry:default_environment',
                                  result['defaultEnvironment'])
        if result.get('scrubIPAddresses') is not None:
            project.update_option('sentry:scrub_ip_address',
                                  result['scrubIPAddresses'])
        if result.get('securityToken') is not None:
            project.update_option('sentry:token', result['securityToken'])
        if result.get('securityTokenHeader') is not None:
            project.update_option('sentry:token_header',
                                  result['securityTokenHeader'])
        if result.get('verifySSL') is not None:
            project.update_option('sentry:verify_ssl', result['verifySSL'])
        if result.get('dataScrubber') is not None:
            project.update_option('sentry:scrub_data', result['dataScrubber'])
        if result.get('dataScrubberDefaults') is not None:
            project.update_option('sentry:scrub_defaults',
                                  result['dataScrubberDefaults'])
        if result.get('sensitiveFields') is not None:
            project.update_option('sentry:sensitive_fields',
                                  result['sensitiveFields'])
        if result.get('safeFields') is not None:
            project.update_option('sentry:safe_fields', result['safeFields'])
        # resolveAge can be None
        if 'resolveAge' in result:
            project.update_option(
                'sentry:resolve_age', 0 if result.get('resolveAge') is None
                else int(result['resolveAge']))
        if result.get('scrapeJavaScript') is not None:
            project.update_option('sentry:scrape_javascript',
                                  result['scrapeJavaScript'])
        if result.get('allowedDomains'):
            project.update_option('sentry:origins', result['allowedDomains'])

        if result.get('isSubscribed'):
            UserOption.objects.set_value(user=request.user,
                                         key='mail:alert',
                                         value=1,
                                         project=project)
        elif result.get('isSubscribed') is False:
            UserOption.objects.set_value(user=request.user,
                                         key='mail:alert',
                                         value=0,
                                         project=project)

        # TODO(dcramer): rewrite options to use standard API config
        if has_project_write:
            options = request.DATA.get('options', {})
            if 'sentry:origins' in options:
                project.update_option(
                    'sentry:origins',
                    clean_newline_inputs(options['sentry:origins']))
            if 'sentry:resolve_age' in options:
                project.update_option('sentry:resolve_age',
                                      int(options['sentry:resolve_age']))
            if 'sentry:scrub_data' in options:
                project.update_option('sentry:scrub_data',
                                      bool(options['sentry:scrub_data']))
            if 'sentry:scrub_defaults' in options:
                project.update_option('sentry:scrub_defaults',
                                      bool(options['sentry:scrub_defaults']))
            if 'sentry:safe_fields' in options:
                project.update_option(
                    'sentry:safe_fields',
                    [s.strip().lower() for s in options['sentry:safe_fields']])
            if 'sentry:sensitive_fields' in options:
                project.update_option('sentry:sensitive_fields', [
                    s.strip().lower()
                    for s in options['sentry:sensitive_fields']
                ])
            if 'sentry:scrub_ip_address' in options:
                project.update_option(
                    'sentry:scrub_ip_address',
                    bool(options['sentry:scrub_ip_address']),
                )
            if 'mail:subject_prefix' in options:
                project.update_option(
                    'mail:subject_prefix',
                    options['mail:subject_prefix'],
                )
            if 'sentry:default_environment' in options:
                project.update_option(
                    'sentry:default_environment',
                    options['sentry:default_environment'],
                )
            if 'sentry:csp_ignored_sources_defaults' in options:
                project.update_option(
                    'sentry:csp_ignored_sources_defaults',
                    bool(options['sentry:csp_ignored_sources_defaults']))
            if 'sentry:csp_ignored_sources' in options:
                project.update_option(
                    'sentry:csp_ignored_sources',
                    clean_newline_inputs(
                        options['sentry:csp_ignored_sources']))
            if 'sentry:blacklisted_ips' in options:
                project.update_option(
                    'sentry:blacklisted_ips',
                    clean_newline_inputs(options['sentry:blacklisted_ips']),
                )
            if 'feedback:branding' in options:
                project.update_option(
                    'feedback:branding',
                    '1' if options['feedback:branding'] else '0')
            if 'sentry:reprocessing_active' in options:
                project.update_option(
                    'sentry:reprocessing_active',
                    bool(options['sentry:reprocessing_active']))
            if 'filters:blacklisted_ips' in options:
                project.update_option(
                    'sentry:blacklisted_ips',
                    clean_newline_inputs(options['filters:blacklisted_ips']))
            if 'filters:{}'.format(FilterTypes.RELEASES) in options:
                if features.has('projects:custom-inbound-filters',
                                project,
                                actor=request.user):
                    project.update_option(
                        'sentry:{}'.format(FilterTypes.RELEASES),
                        clean_newline_inputs(options['filters:{}'.format(
                            FilterTypes.RELEASES)]))
                else:
                    return Response(
                        {'detail': ['You do not have that feature enabled']},
                        status=400)
            if 'filters:{}'.format(FilterTypes.ERROR_MESSAGES) in options:
                if features.has('projects:custom-inbound-filters',
                                project,
                                actor=request.user):
                    project.update_option(
                        'sentry:{}'.format(FilterTypes.ERROR_MESSAGES),
                        clean_newline_inputs(options['filters:{}'.format(
                            FilterTypes.ERROR_MESSAGES)],
                                             case_insensitive=False))
                else:
                    return Response(
                        {'detail': ['You do not have that feature enabled']},
                        status=400)

            self.create_audit_entry(
                request=request,
                organization=project.organization,
                target_object=project.id,
                event=AuditLogEntryEvent.PROJECT_EDIT,
                data=project.get_audit_log_data(),
            )

        data = serialize(project, request.user, DetailedProjectSerializer())
        return Response(data)
Example #46
0
    def _createWidgets(self):

        settingsFile = os.path.join(GetSettingsPath(), 'wxWS')

        self.settsManager = WSManageSettingsWidget(
            parent=self,
            settingsFile=settingsFile,
            default_servers=self.default_servers)

        self.settingsBox = StaticBox(parent=self,
                                     id=wx.ID_ANY,
                                     label=_(" Server settings "))

        self.serverText = StaticText(parent=self,
                                     id=wx.ID_ANY,
                                     label=_("Server:"))
        self.server = TextCtrl(parent=self, id=wx.ID_ANY)

        self.btn_connect = Button(parent=self,
                                  id=wx.ID_ANY,
                                  label=_("&Connect"))
        self.btn_connect.SetToolTip(_("Connect to the server"))
        if not self.server.GetValue():
            self.btn_connect.Enable(False)

        self.infoCollapseLabelExp = _('Show advanced connection settings')
        self.infoCollapseLabelCol = _('Hide advanced connection settings')

        self.adv_conn = wx.CollapsiblePane(parent=self,
                                           label=self.infoCollapseLabelExp,
                                           style=wx.CP_DEFAULT_STYLE
                                           | wx.CP_NO_TLW_RESIZE | wx.EXPAND)

        self.MakeAdvConnPane(pane=self.adv_conn.GetPane())
        self.adv_conn.Collapse(True)
        self.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, self.OnAdvConnPaneChanged,
                  self.adv_conn)

        self.reqDataPanel = wx.Panel(parent=self, id=wx.ID_ANY)

        self.layerNameBox = StaticBox(parent=self.reqDataPanel,
                                      id=wx.ID_ANY,
                                      label=_(" Layer Manager Settings "))

        self.layerNameText = StaticText(parent=self.reqDataPanel,
                                        id=wx.ID_ANY,
                                        label=_("Output layer name:"))
        self.layerName = TextCtrl(parent=self.reqDataPanel, id=wx.ID_ANY)

        for ws in six.iterkeys(self.ws_panels):
            # set class WSPanel argument layerNameTxtCtrl
            self.ws_panels[ws]['panel'] = WSPanel(parent=self.reqDataPanel,
                                                  web_service=ws)
            self.ws_panels[ws]['panel'].capParsed.connect(
                self.OnPanelCapParsed)
            self.ws_panels[ws]['panel'].layerSelected.connect(
                self.OnLayerSelected)

        # buttons
        self.btn_close = Button(parent=self, id=wx.ID_CLOSE)
        self.btn_close.SetToolTip(_("Close dialog"))

        # statusbar
        self.statusbar = wx.StatusBar(parent=self, id=wx.ID_ANY)

        # bindings
        self.btn_close.Bind(wx.EVT_BUTTON, self.OnClose)
        self.Bind(wx.EVT_CLOSE, self.OnClose)
        self.btn_connect.Bind(wx.EVT_BUTTON, self.OnConnect)

        self.server.Bind(wx.EVT_TEXT, self.OnServer)
        self.layerName.Bind(wx.EVT_TEXT, self.OnOutputLayerName)

        self.settsManager.settingsChanged.connect(self.OnSettingsChanged)
        self.settsManager.settingsSaving.connect(self.OnSettingsSaving)
Example #47
0
 def update(self, data):
     for field in set(six.iterkeys(data)) - self._non_updatable_fields():
         if self._lazyhasattr(field):
             setattr(self, field, data[field])
Example #48
0
def _partially_apply_saved_transform_impl(saved_model_dir,
                                          logical_input_map,
                                          tensor_replacement_map=None,
                                          fetch_tensor_names=None):
    """Shared code for partially_apply_saved_transform and fetch_tensor_values.

  This adds nodes to a graph that already contains Tensors representing the
  inputs.  These input Tensors may be placeholders that will be fed when the
  graph is executed, or may be the outputs of some Ops.  Most typically, the
  input Tensors are reading and/or parsing Ops, but they could be anything--
  including the outputs of a prior application of this function using another
  transform graph.

  This function operates on the default Graph in the default Session, and so
  must be called within a context where these are provided.

  Args:
    saved_model_dir: A SavedModel directory providing a transform
      graph.  The MetaGraphDef and signature are selected from the SavedModel
      using keys defined in `../constants.py` ('transform' and
      'transform_signature', respectively).
    logical_input_map: a dict of logical name to Tensor.  The logical names must
      be a subset of those in the input signature of the transform graph, and
      the corresponding Tensors must have the expected types and shapes.
    tensor_replacement_map: a dict of tensor names to `Tensors`.
    fetch_tensor_names: a list of tensor names.

  Returns:
    A tuple of (unbound_inputs, outputs, fetched_tensors) where unbound_inputs
    is a dict of logical name to Tensors that are yet to be mapped or fed,
    outputs is a dict of logical name to Tensor, as provided by the output
    signature of the transform graph, and fetched_tensors is a dict of tensor
    names to `Tensor`s where the tensor names are the names given by
    `fetched_tensor_names`.

  Raises:
    ValueError: if the provided input_tensors dict has keys that are not part
      of the input signature, or any of the provided inputs have the wrong
      type or shape.
    RuntimeError: if there is no default graph available to which to apply the
      transform.
  """
    graph = tf.get_default_graph()
    if graph is None:
        raise RuntimeError('apply_saved_transform() requires a default graph.')

    meta_graph_def, input_signature, output_signature, asset_path_dict = (
        _load_transform_saved_model(saved_model_dir))
    if any(
            name.endswith('$dense_tensor') or name.endswith('$values')
            for name in input_signature.keys()):
        return legacy_saved_transform_io._partially_apply_saved_transform_impl(  # pylint: disable=protected-access
            saved_model_dir, logical_input_map, tensor_replacement_map,
            fetch_tensor_names)
    asset_tensor_dict = {
        k: ops.convert_to_tensor(v)
        for k, v in asset_path_dict.items()
    }

    # Check for inputs that were not part of the input signature.
    unexpected_inputs = (set(six.iterkeys(logical_input_map)) -
                         set(six.iterkeys(input_signature)))
    if unexpected_inputs:
        raise ValueError('Unexpected inputs '
                         'to transform: {}'.format(unexpected_inputs))

    # Create a map from tensor names in the graph to be imported, to the tensors
    # specified in `input_tensors`.
    input_map = {}
    for logical_name, replacement in six.iteritems(logical_input_map):
        tensor_info = input_signature[logical_name]
        if tensor_info.WhichOneof('encoding') == 'coo_sparse':
            input_map[tensor_info.coo_sparse.indices_tensor_name] = (
                replacement.indices)
            input_map[
                tensor_info.coo_sparse.values_tensor_name] = replacement.values
            input_map[tensor_info.coo_sparse.dense_shape_tensor_name] = (
                replacement.dense_shape)
        else:
            input_map[tensor_info.name] = replacement

    input_map.update(asset_tensor_dict)
    if tensor_replacement_map:
        input_map.update(tensor_replacement_map)

    # unique_name may produce e.g. transform_5.  The result has no trailing slash.
    scope = graph.unique_name('transform', mark_as_used=False)

    # unique_name returns an "absolute" name while we want a name relative to the
    # current scope.  Therefore, we check if the current name stack is non-empty,
    # and if so, strip out the existing name scope.
    if graph.get_name_scope():
        current_name_scope = graph.get_name_scope() + '/'
        assert scope.startswith(current_name_scope)
        import_scope = scope[len(current_name_scope):]
    else:
        import_scope = scope

    # Save the ASSET_FILEPATHS before importing the MetaGraphDef
    current_assets = graph.get_collection(tf.GraphKeys.ASSET_FILEPATHS)

    # Warn user if meta_graph_def has saved variables
    if tf.GraphKeys.TRAINABLE_VARIABLES in meta_graph_def.collection_def:
        trainable_vars = meta_graph_def.collection_def[
            tf.GraphKeys.TRAINABLE_VARIABLES].bytes_list.value
        if trainable_vars:
            raise ValueError(
                'The SavedModel contained trainable variables {}.  Because this '
                'function is typically called in the input_fn, trainable variables '
                'are disallowed'.format(trainable_vars))

    # Load the transform graph, applying it to existing Tensors via input_map.
    # Throws ValueError if the input_map gives mismatched types or shapes.
    saver = tf_saver.import_meta_graph(meta_graph_def,
                                       import_scope=import_scope,
                                       input_map=input_map)

    # Wipe out AssetFileDef collection; it is obsolete after loading
    graph.clear_collection(tf.saved_model.constants.ASSETS_KEY)

    # The import may have added Tensors to the ASSET_FILEPATHS collection that
    # were substituted via input_map.  To account for this, wipe out the
    # collection, restore the preexisting collection values, and then write in
    # the new substituted Tensors.
    graph.clear_collection(tf.GraphKeys.ASSET_FILEPATHS)
    for asset_path_tensor in current_assets:
        graph.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS,
                                asset_path_tensor)
    for asset_path_tensor in asset_tensor_dict.values():
        graph.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS,
                                asset_path_tensor)

    if saver:
        checkpoint_path = os.path.join(
            tf.compat.as_bytes(saved_model_dir),
            tf.compat.as_bytes(tf.saved_model.constants.VARIABLES_DIRECTORY),
            tf.compat.as_bytes(tf.saved_model.constants.VARIABLES_FILENAME))

        # We can't use the scope rename from init_from_checkpoint because it relies
        # on var scopes not rebuilt by import_meta_graph. So we need to construct it
        # explicitly by iterating over the variables.
        var_map = {}
        for var in tf.global_variables():
            var_name = var.op.name
            if not var_name.startswith(scope):
                continue

            # Generate original name before importing into scope.
            original_var_name = var_name[len(scope) + 1:]

            match = _PARTITIONED_VARIABLE_NAME_RE.match(original_var_name)
            if match:
                # If the variable is partitioned, extract the base variable name and
                # the index in the partition, then update var_map[base_name] to have
                # var_map[base_name][partition_index] = var.
                base_name = match.group(1)
                partition_index = int(match.group(2))
                if base_name not in var_map:
                    var_map[base_name] = []
                while not partition_index < len(var_map[base_name]):
                    var_map[base_name].append(None)
                assert var_map[base_name][partition_index] is None
                var_map[base_name][partition_index] = var
            else:
                var_map[original_var_name] = var

        if var_map:
            tf.train.init_from_checkpoint(checkpoint_path, var_map)

    # Add computed output tensors to the output.  There are two cases.  When the
    # output is not in the input_map, then we look up the tensor in the imported
    # graph by prepending the import scope and looking up the tensor by name.
    # This will fail if the expected output tensor is not now in the graph
    # under the expected name scope.  When the output is in the input map, then
    # that tensor will have been re-mapped so we use the tensor given in the
    # input_map.
    def lookup_remapped_tensor(tensor_name):
        if tensor_name in input_map:
            return input_map[tensor_name]
        else:
            return graph.get_tensor_by_name(
                ops.prepend_name_scope(tensor_name, scope))

    def lookup_tensor_or_sparse_tensor(tensor_info):
        if tensor_info.WhichOneof('encoding') == 'coo_sparse':
            return tf.SparseTensor(
                lookup_remapped_tensor(
                    tensor_info.coo_sparse.indices_tensor_name),
                lookup_remapped_tensor(
                    tensor_info.coo_sparse.values_tensor_name),
                lookup_remapped_tensor(
                    tensor_info.coo_sparse.dense_shape_tensor_name))
        else:
            return lookup_remapped_tensor(tensor_info.name)

    outputs = {
        logical_name: lookup_tensor_or_sparse_tensor(tensor_info)
        for logical_name, tensor_info in six.iteritems(output_signature)
    }
    # Do the same for input tensors, although such tensors should never be in the
    # input_map since identical tensors in an input_map would be an error.
    unbound_inputs = {
        logical_name: lookup_tensor_or_sparse_tensor(tensor_info)
        for logical_name, tensor_info in six.iteritems(input_signature)
        if logical_name not in logical_input_map
    }
    if fetch_tensor_names is None:
        fetch_tensor_names = []
    fetched_tensors = {
        name: lookup_remapped_tensor(name)
        for name in fetch_tensor_names
    }

    return unbound_inputs, outputs, fetched_tensors
Example #49
0
 def commit_metadata(self):
     for arch, packages in six.iteritems(self.dirty_packages):
         self.commit_package_metadata(arch, packages)
     self.commit_sources_metadata()
     self.commit_release_metadata(six.iterkeys(self.dirty_packages))
     self.dirty_packages = {}
Example #50
0
 def write_all(self):
     """
     Write out all registered config files.
     """
     [self.write(k) for k in six.iterkeys(self.templates)]
Example #51
0
def _sorted(dict_):
    try:
        return sorted(six.iterkeys(dict_))
    except TypeError:
        raise TypeError("nest only supports dicts with sortable keys.")
Example #52
0
def _offline_counter(args):
    """ Offline counting from SAM/BAM file. """
    # Offline counting from SAM/BAM file:
    counts, gc_means = read_counter.count_reads(args.bam.name,
                                                in_format=args.f,
                                                min_aln_qual=args.a,
                                                verbose=not args.Q,
                                                reads_gc=args.g)
    counts = OrderedDict(six.iteritems(counts))

    if args.k is not None:
        calc_words = [int(k) for k in args.k.split(",")]

    data = OrderedDict()

    # Calculate sequence properties:
    if args.z is not None:
        lengths, gc_contents, word_freqs = {}, {}, defaultdict(
            lambda: defaultdict(dict))
        ref_iter = seq_util.read_seq_records(args.z)
        if not args.Q:
            sys.stderr.write("Calculating sequence features:\n")
            ref_iter = tqdm.tqdm(ref_iter)

        for ref in ref_iter:
            # Augment counts dictionary with missing reference entries:
            if ref.id not in counts:
                counts[ref.id] = 0
            lengths[ref.id] = len(ref)
            gc_contents[ref.id] = seq_util.gc_content(str(ref.seq))
            if args.k is not None:
                for word_size in calc_words:
                    bf = seq_util.word_composition(ref.seq, word_size)
                    for word, count in six.iteritems(bf):
                        word_freqs[word_size][
                            ref.id][word] = float(count) / len(ref)

        data['Length'] = [lengths[tr] for tr in six.iterkeys(counts)]
        data['GC_content'] = [gc_contents[tr] for tr in six.iterkeys(counts)]

    data['Reference'] = list(counts.keys())
    data['Count'] = list(counts.values())

    # Calculate word frequencies:
    if args.k is not None and args.z:
        for ks in calc_words:
            for word in next(iter((word_freqs[ks].values()))).keys():
                tmp = []
                for ref in counts.keys():
                    tmp.append(word_freqs[ks][ref][word])
                data[word] = tmp

    data_frame = pd.DataFrame(data)

    if args.g:
        gc_frame = pd.DataFrame({
            'Reference': list(gc_means.keys()),
            'ReadGC': list(gc_means.values())
        })
        data_frame = pd.merge(data_frame,
                              gc_frame,
                              how='inner',
                              on='Reference')

    data_frame = data_frame.sort_values(['Count', 'Reference'],
                                        ascending=False)
    data_frame = data_frame[data_frame.Count > 0]

    if args.t is not None:
        data_frame.to_csv(args.t, sep='\t', index=False)

    if args.p is not None:
        misc.pickle_dump(data, args.p)
Example #53
0
  def build_update(self):
    """Draw sample from proposal conditional on last sample. Then
    accept or reject the sample based on the ratio,

    $\\text{ratio} =
          \log p(x, z^{\\text{new}}) - \log p(x, z^{\\text{old}}) +
          \log g(z^{\\text{new}} \mid z^{\\text{old}}) -
          \log g(z^{\\text{old}} \mid z^{\\text{new}})$

    #### Notes

    The updates assume each Empirical random variable is directly
    parameterized by `tf.Variable`s.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}
    old_sample = OrderedDict(old_sample)

    # Form dictionary in order to replace conditioning on prior or
    # observed variable with conditioning on a specific value.
    dict_swap = {}
    for x, qx in six.iteritems(self.data):
      if isinstance(x, RandomVariable):
        if isinstance(qx, RandomVariable):
          qx_copy = copy(qx, scope='conditional')
          dict_swap[x] = qx_copy.value()
        else:
          dict_swap[x] = qx

    dict_swap_old = dict_swap.copy()
    dict_swap_old.update(old_sample)
    base_scope = tf.get_default_graph().unique_name("inference") + '/'
    scope_old = base_scope + 'old'
    scope_new = base_scope + 'new'

    # Draw proposed sample and calculate acceptance ratio.
    new_sample = old_sample.copy()  # copy to ensure same order
    ratio = 0.0
    for z, proposal_z in six.iteritems(self.proposal_vars):
      # Build proposal g(znew | zold).
      proposal_znew = copy(proposal_z, dict_swap_old, scope=scope_old)
      # Sample znew ~ g(znew | zold).
      new_sample[z] = proposal_znew.value()
      # Increment ratio.
      ratio += tf.reduce_sum(proposal_znew.log_prob(new_sample[z]))

    dict_swap_new = dict_swap.copy()
    dict_swap_new.update(new_sample)

    for z, proposal_z in six.iteritems(self.proposal_vars):
      # Build proposal g(zold | znew).
      proposal_zold = copy(proposal_z, dict_swap_new, scope=scope_new)
      # Increment ratio.
      ratio -= tf.reduce_sum(proposal_zold.log_prob(dict_swap_old[z]))

    for z in six.iterkeys(self.latent_vars):
      # Build priors p(znew) and p(zold).
      znew = copy(z, dict_swap_new, scope=scope_new)
      zold = copy(z, dict_swap_old, scope=scope_old)
      # Increment ratio.
      ratio += tf.reduce_sum(znew.log_prob(dict_swap_new[z]))
      ratio -= tf.reduce_sum(zold.log_prob(dict_swap_old[z]))

    for x in six.iterkeys(self.data):
      if isinstance(x, RandomVariable):
        # Build likelihoods p(x | znew) and p(x | zold).
        x_znew = copy(x, dict_swap_new, scope=scope_new)
        x_zold = copy(x, dict_swap_old, scope=scope_old)
        # Increment ratio.
        ratio += tf.reduce_sum(x_znew.log_prob(dict_swap[x]))
        ratio -= tf.reduce_sum(x_zold.log_prob(dict_swap[x]))

    # Accept or reject sample.
    u = Uniform().sample()
    accept = tf.log(u) < ratio
    sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
                            lambda: list(six.itervalues(old_sample)))
    if not isinstance(sample_values, list):
      # `tf.cond` returns tf.Tensor if output is a list of size 1.
      sample_values = [sample_values]

    sample = {z: sample_value for z, sample_value in
              zip(six.iterkeys(new_sample), sample_values)}

    # Update Empirical random variables.
    assign_ops = []
    for z, qz in six.iteritems(self.latent_vars):
      variable = qz.get_variables()[0]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept (if accepted).
    assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
    return tf.group(*assign_ops)
Example #54
0
 def update_hash(self, path):
     hashes = self.storage.hashes('dists/{0}/{1}'.format(self.codename, path))
     for hash_type in list(six.iterkeys(self.hashes)):
         self.hashes[hash_type][path] = (hashes[hash_type].hexdigest(), str(hashes['size'].size))
# Variable names, types, literals etc.
# ------------------------------------------------------------------------------
NAME = pp.Word(pp.alphanums + "_")
INT = pp.Word(pp.nums + "UuLl")
FLOAT = pp.Word(pp.nums + ".+-EeFf")
NUMBER = FLOAT | INT

# Dimensions can be of the form `[3]`, `[constant_name]` or `[2*constant_name]`
ARRAY_DIM = pp.Combine(LBRACK + (INT | NAME) + pp.Optional(pp.Literal("*")) +
                       pp.Optional(INT | NAME) + RBRACK)

PTR = pp.Literal("*")
EXTERN = pp.Keyword("extern")
NATIVE_TYPENAME = pp.MatchFirst(
    [pp.Keyword(n) for n in six.iterkeys(C_TO_CTYPES)])

# Macros.
# ------------------------------------------------------------------------------

HDR_GUARD = DEFINE + "THIRD_PARTY_MUJOCO_HDRS_"

# e.g. "#define mjUSEDOUBLE"
DEF_FLAG = pp.Group(DEFINE + NAME("name") +
                    (COMMENT("comment") | EOL)).ignore(HDR_GUARD)

# e.g. "#define mjMINVAL    1E-14       // minimum value in any denominator"
DEF_CONST = pp.Group(DEFINE + NAME("name") + (NUMBER | NAME)("value") +
                     (COMMENT("comment") | EOL))

# e.g. "X( mjtNum*, name_textadr, ntext, 1 )"
Example #56
0
 def iterflatkeys(self):
     ''' Return iterator of flattened keys '''
     return six.iterkeys(self.flattened())
 def __repr__(self):
     class_name = self.__class__.__name__
     attrs = six.iterkeys(vars(self))
     result = ', '.join(
         ['%s=%s' % (attr, repr(getattr(self, attr))) for attr in attrs])
     return '%s(%s)' % (class_name, result)
Example #58
0
def lower_backtrack_blocks(match_query, query_metadata_table):
    """Lower Backtrack blocks into (QueryRoot, MarkLocation) pairs of blocks."""
    # The lowering works as follows:
    #   1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
    #   2. Start new traversal from the type and location to which the Backtrack pointed.
    #   3. If the Backtrack block had an associated MarkLocation, ensure that location is marked
    #      as equivalent to the location where the Backtrack pointed.
    #   4. Rewrite all expressions that reference such revisit locations, making them refer to
    #      the revisit origin location instead.
    new_match_traversals = []

    locations_needing_translation = set()

    for current_match_traversal in match_query.match_traversals:
        new_traversal = []
        for step in current_match_traversal:
            if not isinstance(step.root_block, Backtrack):
                new_traversal.append(step)
            else:
                # 1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
                if new_traversal:
                    new_match_traversals.append(new_traversal)
                    new_traversal = []

                backtrack_location = step.root_block.location
                backtrack_location_info = query_metadata_table.get_location_info(
                    backtrack_location)

                # 2. Start new traversal from the type and location to which the Backtrack pointed.
                new_root_block = QueryRoot({backtrack_location_info.type.name})
                new_as_block = MarkLocation(backtrack_location)

                # 3. If the Backtrack block had an associated MarkLocation, mark that location
                #    as equivalent to the location where the Backtrack pointed.
                if step.as_block is not None:
                    locations_needing_translation.add(step.as_block.location)

                if step.coerce_type_block is not None:
                    raise AssertionError(
                        u"Encountered type coercion in a MatchStep with "
                        u"a Backtrack root block, this is unexpected: {} {}".
                        format(step, match_query))

                new_step = step._replace(root_block=new_root_block,
                                         as_block=new_as_block)
                new_traversal.append(new_step)

        new_match_traversals.append(new_traversal)

    new_match_query = match_query._replace(
        match_traversals=new_match_traversals)

    location_translations = make_revisit_location_translations(
        query_metadata_table)

    if locations_needing_translation != set(
            six.iterkeys(location_translations)):
        raise AssertionError(
            u"Unexpectedly, the revisit location translations table computed from "
            u"the query metadata table did not match the locations needing "
            u"translation. This is a bug. {} {}".format(
                location_translations, locations_needing_translation))

    return _translate_equivalent_locations(new_match_query,
                                           location_translations)
Example #59
0
 def get_objids(self):
     return six.iterkeys(self.offsets)
 def __str__(self):
     attrs = six.iterkeys(vars(self))
     result = ', '.join(
         ['%s : %s' % (attr, str(getattr(self, attr))) for attr in attrs])
     return '{%s}' % result