Пример #1
0
    def _cross_reference_elements(self):
        """
        Links the elements to nodes, properties (and materials depending on
        the card).
        """
        for elem in itervalues(self.elements):
            try:
                elem.cross_reference(self)
            except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
                self._ixref_errors += 1
                var = traceback.format_exception_only(type(e), e)
                self._stored_xref_errors.append((elem, var))
                if self._ixref_errors > self._nxref_errors:
                    self.pop_xref_errors()

        for elem in itervalues(self.rigid_elements):
            try:
                elem.cross_reference(self)
            except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
                self._ixref_errors += 1
                var = traceback.format_exception_only(type(e), e)
                self._stored_xref_errors.append((elem, var))
                if self._ixref_errors > self._nxref_errors:
                    self.pop_xref_errors()

        for elem in itervalues(self.plotels):
            try:
                elem.cross_reference(self)
            except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
                self._ixref_errors += 1
                var = traceback.format_exception_only(type(e), e)
                self._stored_xref_errors.append((elem, var))
                if self._ixref_errors > self._nxref_errors:
                    self.pop_xref_errors()
Пример #2
0
 def names_to_rpm_deps(self, python_names):
     # Given a set of packages in Python namespace, return the equivalent
     # Requires and Conflicts in RPM namespace.
     requires, conflicts = self._convert_names_to_rpm(python_names, only_name=False)
     requires_list = [req for value in six.itervalues(requires) for req in value]
     conflicts_list = [req for value in six.itervalues(conflicts) for req in value]
     return requires_list, conflicts_list
Пример #3
0
def cossim(vec1, vec2):
    """Get cosine similarity between two sparse vectors.
    The similarity is a number between <-1.0, 1.0>, higher is more similar.

    Parameters
    ----------
    vec1 : list of (int, float)
        Vector in BoW format
    vec2 : list of (int, float)
        Vector in BoW format

    Returns
    -------
    float
        Cosine similarity between `vec1` and `vec2`.

    """
    vec1, vec2 = dict(vec1), dict(vec2)
    if not vec1 or not vec2:
        return 0.0
    vec1len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec1)))
    vec2len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec2)))
    assert vec1len > 0.0 and vec2len > 0.0, "sparse documents must not contain any explicit zero entries"
    if len(vec2) < len(vec1):
        vec1, vec2 = vec2, vec1  # swap references so that we iterate over the shorter vector
    result = sum(value * vec2.get(index, 0.0) for index, value in iteritems(vec1))
    result /= vec1len * vec2len  # rescale by vector lengths
    return result
Пример #4
0
    def _cross_reference_materials(self):
        """
        Links the materials to materials (e.g. MAT1, CREEP)
        often this is a pass statement
        """
        for mat in itervalues(self.materials):  # MAT1
            try:
                mat.cross_reference(self)
            except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
                self._ixref_errors += 1
                var = traceback.format_exception_only(type(e), e)
                self._stored_xref_errors.append((mat, var))
                if self._ixref_errors > self._nxref_errors:
                    self.pop_xref_errors()

        # CREEP - depends on MAT1
        data = [self.MATS1, self.MATS3, self.MATS8,
                self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
                self.MATT8, self.MATT9]
        for material_deps in data:
            for mat in itervalues(material_deps):
                try:
                    mat.cross_reference(self)
                except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
                    self._ixref_errors += 1
                    var = traceback.format_exception_only(type(e), e)
                    self._stored_xref_errors.append((mat, var))
                    if self._ixref_errors > self._nxref_errors:
                        self.pop_xref_errors()
Пример #5
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign communicator to this `Component`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the parent system.

        parent_dir : str
            The absolute directory of the parent, or '' if unspecified. Used to
            determine the absolute directory of all FileRefs.

        """
        super(Component, self)._setup_communicators(comm, parent_dir)

        # set absolute directories of any FileRefs
        for meta in chain(itervalues(self._init_unknowns_dict),
                          itervalues(self._init_params_dict)):
            val = meta['val']
            #if var is a FileRef, set its absolute directory
            if isinstance(val, FileRef):
                self._fileref_setup(val)

        if not self.is_active():
            for meta in itervalues(self._init_params_dict):
                meta['remote'] = True
            for meta in itervalues(self._init_unknowns_dict):
                meta['remote'] = True
Пример #6
0
 def get_cell_info_for_neighbors(self):
     """Return cell information for all neighbor cells."""
     cell_list = [cell.get_cell_info()
             for cell in six.itervalues(self.child_cells)]
     cell_list.extend([cell.get_cell_info()
             for cell in six.itervalues(self.parent_cells)])
     return cell_list
Пример #7
0
def _convert_aero(model, xyz_scale, time_scale, weight_scale):
    """
    Converts the aero cards
      - CAEROx, PAEROx, SPLINEx, AECOMP, AELIST, AEPARAM, AESTAT, AESURF, AESURFS
    """
    area_scale = xyz_scale ** 2
    velocity_scale = xyz_scale / time_scale
    pressure_scale = weight_scale / xyz_scale ** 2
    density_scale = weight_scale / xyz_scale ** 3
    for aero in itervalues(model.aero):
    #if hasattr(model, 'aero'):
        #aero = model.aero
        print(aero.object_attributes())
        aero.refc *= xyz_scale
        aero.refb *= xyz_scale
        aero.sref *= area_scale
        aero.velocity *= velocity_scale
        assert np.allclose(aero.density, 1.0), aero
    for aeros in itervalues(model.aeros):
        #print(aeros)
        #print(aeros.object_attributes())
        aeros.cref *= xyz_scale
        aeros.bref *= xyz_scale
        aeros.sref *= area_scale

    for caero in itervalues(model.caeros):
        if caero.type in ['CAERO1']:
            caero.p1 *= xyz_scale
            caero.p4 *= xyz_scale
            caero.x12 *= xyz_scale
            caero.x43 *= xyz_scale
        else:
            raise NotImplementedError(caero)
    #for paero in itervalues(model.paeros):
        #paero.cross_reference(model)
    for trim in itervalues(model.trims):
        trim.q *= pressure_scale
    #for spline in itervalues(model.splines):
        #spline.convert(model)
    #for aecomp in itervalues(model.aecomps):
        #aecomp.cross_reference(model)
    #for aelist in itervalues(model.aelists):
        #aelist.cross_reference(model)
    #for aeparam in itervalues(model.aeparams):
        #aeparam.cross_reference(model)
    #for aestat in itervalues(model.aestats):
        #aestat.cross_reference(model)
    #for aesurf in itervalues(model.aesurf):
        #aesurf.cross_reference(model)
    #for aesurfs in itervalues(model.aesurfs):
        #aesurfs.cross_reference(model)

    # update only the FLFACTs corresponding to density
    flfact_ids = set([])
    for flutter in itervalues(model.flutters):
        flfact = flutter.density
        flfact_ids.add(flfact.sid)
    for flfact_id in flfact_ids: # density
        flfact = model.flfacts[flfact_id]
        flfact.factors *= density_scale
Пример #8
0
    def _cross_reference_materials(self):
        """
        Links the materials to materials (e.g. MAT1, CREEP)
        often this is a pass statement
        """
        for mat in itervalues(self.materials):  # MAT1
            try:
                mat.cross_reference(self)
            except:
                msg = "Couldn't cross reference Material\n%s" % (str(mat))
                self.log.error(msg)
                raise

        # CREEP - depends on MAT1
        data = [self.MATS1, self.MATS3, self.MATS8,
                self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
                self.MATT8, self.MATT9]
        for material_deps in data:
            for mat in itervalues(material_deps):
                try:
                    mat.cross_reference(self)
                except:
                    msg = "Couldn't cross reference Material\n%s" % (str(mat))
                    self.log.error(msg)
                    raise
Пример #9
0
    def test_multiple_fonts(self):
        vera = os.path.join(os.path.dirname(__file__), "..", "fonts", "Vera.ttf")
        __current_test_mode_setting = settings.CAPTCHA_FONT_PATH
        settings.CAPTCHA_FONT_PATH = vera

        for key in [store.hashkey for store in six.itervalues(self.stores)]:
            response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
            self.assertEqual(response.status_code, 200)
            self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))

        settings.CAPTCHA_FONT_PATH = [vera, vera, vera]
        for key in [store.hashkey for store in six.itervalues(self.stores)]:
            response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
            self.assertEqual(response.status_code, 200)
            self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))

        settings.CAPTCHA_FONT_PATH = False
        for key in [store.hashkey for store in six.itervalues(self.stores)]:
            try:
                response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
                self.fail()
            except ImproperlyConfigured:
                pass

        settings.CAPTCHA_FONT_PATH = __current_test_mode_setting
Пример #10
0
    def _setup_vectors(self, param_owners, parent,
                       top_unknowns=None, impl=None):
        """
        Set up local `VecWrappers` to store this component's variables.

        Args
        ----
        param_owners : dict
            a dictionary mapping `System` pathnames to the pathnames of
            parameters they are reponsible for propagating. (ignored)

        parent : `Group`
            The parent `Group`.

        top_unknowns : `VecWrapper`, optional
            the `Problem` level unknowns `VecWrapper`

        impl : an implementation factory, optional
            Specifies the factory object used to create `VecWrapper` objects.
        """
        self.params = self.unknowns = self.resids = None
        self.dumat, self.dpmat, self.drmat = {}, {}, {}
        relevance = self._relevance

        if not self.is_active():
            return

        self._impl = impl

        # create map of relative name in parent to relative name in child
        self._relname_map = self._get_relname_map(parent.unknowns)

        # create storage for the relevant vecwrappers, keyed by
        # variable_of_interest
        all_vois = set([None])
        for group, vois in iteritems(relevance.groups):
            if group is not None:
                all_vois.update(vois)
                for voi in vois:
                    self._create_views(top_unknowns, parent, [],
                                       voi)

        # we don't get non-deriv vecs (u, p, r) unless we have a None group,
        # so force their creation here
        self._create_views(top_unknowns, parent, [], None)

        # create params vec entries for any unconnected params
        for meta in itervalues(self._params_dict):
            pathname = meta['pathname']
            name = self.params._scoped_abs_name(pathname)
            if name not in self.params:
                self.params._add_unconnected_var(pathname, meta)

        # cache this to speed up apply linear
        self._abs_inputs = {}
        for voi, vec in iteritems(self.dpmat):
            self._abs_inputs[voi] = {meta['pathname'] for meta in itervalues(vec)
                                         if not meta.get('pass_by_obj')}

        self._setup_gs_outputs(all_vois)
Пример #11
0
 def __init__(self, wf_dict, gi=None):
     super(Workflow, self).__init__(wf_dict, gi=gi)
     missing_ids = []
     if gi:
         tools_list_by_id = [t.id for t in gi.tools.get_previews()]
     else:
         tools_list_by_id = []
     for k, v in six.iteritems(self.steps):
         # convert step ids to str for consistency with outer keys
         v['id'] = str(v['id'])
         for i in six.itervalues(v['input_steps']):
             i['source_step'] = str(i['source_step'])
         step = self._build_step(v, self)
         self.steps[k] = step
         if step.type == 'tool':
             if not step.tool_inputs or step.tool_id not in tools_list_by_id:
                 missing_ids.append(k)
     input_labels_to_ids = {}
     for id_, d in six.iteritems(self.inputs):
         input_labels_to_ids.setdefault(d['label'], set()).add(id_)
     tool_labels_to_ids = {}
     for s in six.itervalues(self.steps):
         if s.type == 'tool':
             tool_labels_to_ids.setdefault(s.tool_id, set()).add(s.id)
     object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
     object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
     dag, inv_dag = self._get_dag()
     heads, tails = set(dag), set(inv_dag)
     object.__setattr__(self, 'dag', dag)
     object.__setattr__(self, 'inv_dag', inv_dag)
     object.__setattr__(self, 'source_ids', heads - tails)
     assert self.data_input_ids == set(self.inputs)
     object.__setattr__(self, 'sink_ids', tails - heads)
     object.__setattr__(self, 'missing_ids', missing_ids)
Пример #12
0
 def metric_variables(self):
   v = []
   for metric in six.itervalues(self._metrics):
     v += metric.variables
   for evaluator in six.itervalues(self._evaluators):
     v += evaluator.metric_variables
   return v
Пример #13
0
    def start(self, register=True):
        self.running = True
        logger.info('starting %s at %s (pid=%s)', ', '.join(self.service_types), self.endpoint, os.getpid())
        self.recv_loop_greenlet = self.spawn(self.recv_loop)
        self.monitor.start()
        self.service_registry.on_start()
        self.event_system.on_start()

        for service in six.itervalues(self.installed_services):
            service.on_start()
            service.configure({})

        if register:
            for service_type, service in six.iteritems(self.installed_services):
                if not service.register_with_coordinator:
                    continue
                try:
                    self.service_registry.register(self, service_type)
                except RegistrationFailure:
                    logger.info("registration failed %s, %s", service_type, service)
                    self.stop()

        for interface in six.itervalues(self.installed_services):
            for pattern, handler in type(interface).event_dispatcher:
                self.subscribe(pattern)
Пример #14
0
    def open(self, filename=None):
        """ Open config file and read it.
        """
        logging.debug(__name__ + ": open")
        if filename != None:
            self._filename = str(filename)
        global imported_configs
        self._isReplaceConfig = False
        self._history=None

# import input-config and make list of all imported configs
        for i in imported_configs.iterkeys():
            if i in sys.modules.keys():
                del sys.modules[i]
        sys.path.insert(0, os.path.dirname(self._filename))
        common_imports = sys.modules.copy()

        import imp
        theFile = open(self._filename)
        self._file = imp.load_module(os.path.splitext(os.path.basename(self._filename))[0].replace(".", "_"), theFile, self._filename, ("py", "r", 1))
        theFile.close()
        
        imported_configs = sys.modules.copy()
        for i in common_imports.iterkeys():
            del imported_configs[i]
        
# make dictionary that connects every cms-object with the file in which it is defined
        for j in six.itervalues(imported_configs):
          setj = set(dir(j))
          for entry in setj:
              if entry[0] != "_" and entry != "cms":
                source = 1
                for k in six.itervalues(imported_configs):
                    if hasattr(k, entry):
                      setk = set(dir(k))
                      if len(setk) < len(setj) and setk < setj:
                        source = 0
                if source == 1:
                    filen = self._filename
                    if hasattr(j, "__file__"):
                        filen = j.__file__
                    file_dict[entry] = filen

# collect all path/sequences/modules of the input-config in a list
        if self.process():
            self.setProcess(self.process())
            self._readHeaderInfo()
            self._history=self.process().dumpHistory()
            if not self._isReplaceConfig and hasattr(self.process(),"resetHistory"):
                self.process().resetHistory()
        else:
            self._initLists()
            for entry in dir(self._file):
                o=getattr(self._file, entry)
                if entry[0] != "_" and entry != "cms" and hasattr(o, "label_"):
                    getattr(self._file, entry).setLabel(entry)
                    text = os.path.splitext(os.path.basename(file_dict[o.label_()]))[0]
                    if text == os.path.splitext(os.path.basename(self._filename))[0] and not o in self._allObjects:
                        self._readRecursive(None, o)
        return True
Пример #15
0
def mergedirs(listing):
    # type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
    r = []  # type: List[Dict[Text, Any]]
    ents = {}  # type: Dict[Text, Any]
    collided = set()  # type: Set[Text]
    for e in listing:
        if e["basename"] not in ents:
            ents[e["basename"]] = e
        elif e["class"] == "Directory":
            if e.get("listing"):
                ents[e["basename"]].setdefault("listing", []).extend(e["listing"])
            if ents[e["basename"]]["location"].startswith("_:"):
                ents[e["basename"]]["location"] = e["location"]
        elif e["location"] != ents[e["basename"]]["location"]:
            # same basename, different location, collision,
            # rename both.
            collided.add(e["basename"])
            e2 = ents[e["basename"]]

            e["basename"] = urllib.parse.quote(e["location"], safe="")
            e2["basename"] = urllib.parse.quote(e2["location"], safe="")

            e["nameroot"], e["nameext"] = os.path.splitext(e["basename"])
            e2["nameroot"], e2["nameext"] = os.path.splitext(e2["basename"])

            ents[e["basename"]] = e
            ents[e2["basename"]] = e2
    for c in collided:
        del ents[c]
    for e in itervalues(ents):
        if e["class"] == "Directory" and "listing" in e:
            e["listing"] = mergedirs(e["listing"])
    r.extend(itervalues(ents))
    return r
Пример #16
0
    def update(self, param):
        """Invokes hook functions and updates the parameter.

        Args:
            param (~chainer.Variable): Variable to be updated.

        """
        if not self.enabled:
            return

        self.t += 1

        if self._use_fp32_update and param.dtype == numpy.float16:
            if self._fp32_param is None:
                self._fp32_param = variable.Variable(
                    param.array.astype(numpy.float32),
                    name=param.name)
            fp32_param = self._fp32_param
            fp32_param.grad = param.grad.astype(numpy.float32)

            if fp32_param.data is not None:
                self._prepare(fp32_param)
            for hook in six.itervalues(self._hooks):
                hook(self, fp32_param)
            self.update_core(fp32_param)

            param.data = fp32_param.data.astype(param.dtype)
            fp32_param.grad = None
        else:
            if param.data is not None:
                self._prepare(param)
            for hook in six.itervalues(self._hooks):
                hook(self, param)
            self.update_core(param)
Пример #17
0
 def _iter_vars(self):
     for var in itervalues(self.vars):
         if var.is_expression() or not var.is_indexed():
             yield var
         else:
             for v in itervalues(var):
                 yield v
Пример #18
0
    def test_load_directory_caching_with_files_updated(self):
        self.create_config_file('policy.d/a.conf', POLICY_A_CONTENTS)

        self.enforcer.load_rules(False)
        self.assertIsNotNone(self.enforcer.rules)

        old = six.next(six.itervalues(
            self.enforcer._policy_dir_mtimes))
        self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))

        # Touch the file
        conf_path = os.path.join(self.config_dir, 'policy.d/a.conf')
        stinfo = os.stat(conf_path)
        os.utime(conf_path, (stinfo.st_atime + 10, stinfo.st_mtime + 10))

        self.enforcer.load_rules(False)
        self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
        self.assertEqual(old, six.next(six.itervalues(
            self.enforcer._policy_dir_mtimes)))

        loaded_rules = jsonutils.loads(str(self.enforcer.rules))
        self.assertEqual('is_admin:True', loaded_rules['admin'])
        self.check_loaded_files([
            'policy.json',
            'policy.d/a.conf',
            'policy.d/a.conf',
        ])
Пример #19
0
    def shutdown(self, wait=True):
        """
        Shuts down the scheduler, along with its executors and job stores.

        Does not interrupt any currently running jobs.

        :param bool wait: ``True`` to wait until all currently executing jobs have finished
        :raises SchedulerNotRunningError: if the scheduler has not been started yet

        """
        if self.state == STATE_STOPPED:
            raise SchedulerNotRunningError

        self.state = STATE_STOPPED

        with self._jobstores_lock, self._executors_lock:
            # Shut down all executors
            for executor in six.itervalues(self._executors):
                executor.shutdown(wait)

            # Shut down all job stores
            for jobstore in six.itervalues(self._jobstores):
                jobstore.shutdown()

        self._logger.info('Scheduler has been shut down')
        self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
Пример #20
0
    def record_iteration(self, params, unknowns, resids, metadata):
        """Record the current iteration.

        Args
        ----
        params : `VecWrapper`
            `VecWrapper` containing parameters. (p)

        unknowns : `VecWrapper`
            `VecWrapper` containing outputs and states. (u)

        resids : `VecWrapper`
            `VecWrapper` containing residuals. (r)

        metadata : dict
            Dictionary containing execution metadata (e.g. iteration coordinate).
        """

        iteration_coordinate = metadata['coord']

        if self.options['record_params']:
            params = self._filter_vector(params, 'p', iteration_coordinate)
        else:
            params = None
        if self.options['record_unknowns']:
            unknowns = self._filter_vector(unknowns, 'u', iteration_coordinate)
        else:
            unknowns = None
        if self.options['record_resids']:
            resids = self._filter_vector(resids, 'r', iteration_coordinate)
        else:
            resids = None

        if self._wrote_header is False:
            header = []
            if params is not None:
                header.extend(params)
            if unknowns is not None:
                header.extend(unknowns)
            if resids is not None:
                header.extend(resids)
            if self.options['record_derivs']:
                header.append('Derivatives')
            self.ncol = len(header)
            self.writer.writerow(header)
            self._wrote_header = True

        row = []
        if params is not None:
            row.extend(serialize(value) for value in itervalues(params))
        if unknowns is not None:
            row.extend(serialize(value) for value in itervalues(unknowns))
        if resids is not None:
            row.extend(serialize(value) for value in itervalues(resids))
        if self.options['record_derivs']:
            row.append(None)
        self.writer.writerow(row)

        if self.out:
            self.out.flush()
Пример #21
0
def test_basic_request(graph, groups, permissions, session, standard_graph, users):  # noqa: F811
    group_sre = groups["team-sre"]
    group_not_sre = [g for name, g in iteritems(groups) if name != "team-sre"]

    assert not any(
        [
            get_requests_by_group(session, group, status="pending").all()
            for group in itervalues(groups)
        ]
    ), "no group should start with pending requests"

    group_sre.add_member(users["*****@*****.**"], users["*****@*****.**"], reason="for the lulz")
    session.commit()

    request_not_sre = [
        get_requests_by_group(session, group, status="pending").all() for group in group_not_sre
    ]
    assert not any(request_not_sre), "only affected group should show pending requests"
    request_sre = get_requests_by_group(session, group_sre, status="pending").all()
    assert len(request_sre) == 1, "affected group should have request"

    request = session.query(Request).filter_by(id=request_sre[0].id).scalar()
    request.update_status(users["*****@*****.**"], "actioned", "for being a good person")
    session.commit()

    assert not any(
        [
            get_requests_by_group(session, group, status="pending").all()
            for group in itervalues(groups)
        ]
    ), "no group should have requests after being actioned"
Пример #22
0
    def _promoted(self, name):
        """Determine if the given variable name is being promoted from this
        `System`.

        Args
        ----
        name : str
            The name of a variable, relative to this `System`.

        Returns
        -------
        bool
            True if the named variable is being promoted from this `System`.

        Raises
        ------
        TypeError
            if the promoted variable specifications are not in a valid format
        """
        if isinstance(self._promotes, string_types):
            raise TypeError("'%s' promotes must be specified as a list, "
                            "tuple or other iterator of strings, but '%s' was specified" %
                            (self.name, self._promotes))

        for prom in self._promotes:
            if fnmatch(name, prom):
                for meta in chain(itervalues(self._params_dict),
                                  itervalues(self._unknowns_dict)):
                    if name == meta.get('promoted_name'):
                        return True

        return False
Пример #23
0
    def get_signals_to_object(self, sink_object):
        """Get the signals received by a sink object.

        Returns
        -------
        {port : [ReceptionSpec, ...], ...}
            Dictionary mapping ports to the lists of objects specifying
            incoming signals.
        """
        signals = collections.defaultdict(list)

        # For all connections we have reference to identify those which
        # terminate at the given object. For those that do add a new entry to
        # the signal dictionary.
        params_and_sinks = chain(*chain(*(itervalues(x) for x in
                                          itervalues(self._connections))))
        for param_and_sinks in params_and_sinks:
            # tp_sinks are pairs of transmission parameters and sinks
            # Extract the transmission parameters
            sig_params, _ = param_and_sinks.parameters

            # For each sink, if the sink object is the specified object
            # then add signal to the list.
            for sink in param_and_sinks.sinks:
                if sink.sink_object is sink_object:
                    # This is the desired sink object, so remember the
                    # signal. First construction the reception
                    # specification.
                    signals[sink.port].append(
                        ReceptionSpec(sig_params, sink.reception_parameters)
                    )

        return signals
Пример #24
0
def _Net_batch(self, blobs):
    """
    Batch blob lists according to net's batch size.

    Take
    blobs: Keys blob names and values are lists of blobs (of any length).
           Naturally, all the lists should have the same length.

    Give (yield)
    batch: {blob name: list of blobs} dict for a single batch.
    """
    num = len(six.next(six.itervalues(blobs)))
    batch_size = six.next(six.itervalues(self.blobs)).num
    remainder = num % batch_size
    num_batches = num // batch_size

    # Yield full batches.
    for b in range(num_batches):
        i = b * batch_size
        yield {name: blobs[name][i:i + batch_size] for name in blobs}

    # Yield last padded batch, if any.
    if remainder > 0:
        padded_batch = {}
        for name in blobs:
            padding = np.zeros((batch_size - remainder,)
                               + blobs[name].shape[1:])
            padded_batch[name] = np.concatenate([blobs[name][-remainder:],
                                                 padding])
        yield padded_batch
Пример #25
0
def test_pre_fit():
    y0 = synthetic_spectrum()
    x0 = np.arange(len(y0))
    # the following items should appear
    item_list = ['Ar_K', 'Fe_K', 'compton', 'elastic']

    param = get_para()

    # fit without weights
    x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=None)
    for v in item_list:
        assert_true(v in y_total)
    sum1 = np.sum(six.itervalues(y_total))
    # r squares as a measurement
    r1 = 1- np.sum((sum1-y0)**2)/np.sum((y0-np.mean(y0))**2)
    assert_true(r1 > 0.85)

    # fit with weights
    w = 1/np.sqrt(y0)
    x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=1/np.sqrt(y0))
    for v in item_list:
        assert_true(v in y_total)
    sum2 = np.sum(six.itervalues(y_total))
    # r squares as a measurement
    r2 = 1- np.sum((sum2-y0)**2)/np.sum((y0-np.mean(y0))**2)
    assert_true(r2 > 0.85)
Пример #26
0
    def __init__(self, history_specs, initial_sids, initial_dt):

        # History specs to be served by this container.
        self.history_specs = history_specs
        self.frequency_groups = \
            group_by_frequency(itervalues(self.history_specs))

        # The set of fields specified by all history specs
        self.fields = set(spec.field for spec in itervalues(history_specs))

        # This panel contains raw minutes for periods that haven't been fully
        # completed.  When a frequency period rolls over, these minutes are
        # digested using some sort of aggregation call on the panel (e.g. `sum`
        # for volume, `max` for high, `min` for low, etc.).
        self.buffer_panel = self.create_buffer_panel(
            initial_sids,
            initial_dt,
        )

        # Dictionaries with Frequency objects as keys.
        self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
            self.create_digest_panels(initial_sids, initial_dt)

        # Populating initial frames here, so that the cost of creating the
        # initial frames does not show up when profiling.  These frames are
        # cached since mid-stream creation of containing data frames on every
        # bar is expensive.
        self.create_return_frames(initial_dt)

        # Helps prop up the prior day panel against having a nan, when the data
        # has been seen.
        self.last_known_prior_values = {field: {} for field in self.fields}
Пример #27
0
def _Net_forward_all(self, blobs=None, **kwargs):
    """
    Run net forward in batches.

    Take
    blobs: list of blobs to extract as in forward()
    kwargs: Keys are input blob names and values are blob ndarrays.
            Refer to forward().

    Give
    all_outs: {blob name: list of blobs} dict.
    """
    # Collect outputs from batches
    all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
    for batch in self._batch(kwargs):
        outs = self.forward(blobs=blobs, **batch)
        for out, out_blob in six.iteritems(outs):
            all_outs[out].extend(out_blob.copy())
    # Package in ndarray.
    for out in all_outs:
        all_outs[out] = np.asarray(all_outs[out])
    # Discard padding.
    pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
    if pad:
        for out in all_outs:
            all_outs[out] = all_outs[out][:-pad]
    return all_outs
Пример #28
0
  def build_update(self):
    """
    Simulate Langevin dynamics using a discretized integrator. Its
    discretization error goes to zero as the learning rate decreases.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}

    # Simulate Langevin dynamics.
    learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
    grad_log_joint = tf.gradients(self._log_joint(old_sample),
                                  list(six.itervalues(old_sample)))
    sample = {}
    for z, qz, grad_log_p in \
        zip(six.iterkeys(self.latent_vars),
            six.itervalues(self.latent_vars),
            grad_log_joint):
      event_shape = qz.get_event_shape()
      normal = Normal(mu=tf.zeros(event_shape),
                      sigma=learning_rate * tf.ones(event_shape))
      sample[z] = old_sample[z] + 0.5 * learning_rate * grad_log_p + \
          normal.sample()

    # Update Empirical random variables.
    assign_ops = []
    variables = {x.name: x for x in
                 tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
    for z, qz in six.iteritems(self.latent_vars):
      variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept.
    assign_ops.append(self.n_accept.assign_add(1))
    return tf.group(*assign_ops)
Пример #29
0
 def stop_all(self):
     for p in itervalues(self._programs):
         p.log_cpu_times()
     for p in itervalues(self._programs):
         p.stop()
     for p in itervalues(self._programs):
         p.wait_or_kill()
Пример #30
0
    def __init__(self, iterator, optimizer, converter=convert.concat_examples,
                 device=None, loss_func=None, loss_scale=None,
                 auto_new_epoch=True):
        if device is not None:
            device = backend._get_device_compat(device)

        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self._iterators = iterator

        if not isinstance(optimizer, dict):
            optimizer = {'main': optimizer}
        self._optimizers = optimizer

        if device is not None:
            for optimizer in six.itervalues(self._optimizers):
                optimizer.target._to_device(
                    device, skip_between_cupy_devices=True)

        self.converter = converter
        self.loss_func = loss_func
        self.device = device
        self.iteration = 0

        self.loss_scale = loss_scale
        if loss_scale is not None:
            for optimizer in six.itervalues(self._optimizers):
                optimizer.set_loss_scale(loss_scale)

        self.auto_new_epoch = auto_new_epoch
        if auto_new_epoch:
            for o in six.itervalues(self._optimizers):
                o.use_auto_new_epoch = True
Пример #31
0
    def _squash_layers(self, layers_to_squash, layers_to_move):
        self.log.info("Starting squashing...")

        # Reverse the layers to squash - we begin with the newest one
        # to make the tar lighter
        layers_to_squash.reverse()

        # Find all files in layers that we don't squash
        files_in_layers_to_move = self._files_in_layers(
            layers_to_move, self.old_image_dir)

        with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
            to_skip = []
            skipped_markers = {}
            skipped_hard_links = []
            skipped_sym_links = []
            skipped_files = []
            # List of filenames in the squashed archive
            squashed_files = []

            for layer_id in layers_to_squash:
                layer_tar_file = os.path.join(
                    self.old_image_dir, layer_id, "layer.tar")

                self.log.info("Squashing file '%s'..." % layer_tar_file)

                # Open the exiting layer to squash
                with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
                    # Find all marker files for all layers
                    # We need the list of marker files upfront, so we can
                    # skip unnecessary files
                    members = layer_tar.getmembers()
                    markers = self._marker_files(layer_tar, members)

                    skipped_sym_link_files = {}
                    skipped_hard_link_files = {}
                    files_to_skip = []

                    # Iterate over the marker files found for this particular
                    # layer and if in the squashed layers file corresponding
                    # to the marker file is found, then skip both files
                    for marker, marker_file in six.iteritems(markers):
                        actual_file = marker.name.replace('.wh.', '')
                        files_to_skip.append(self._normalize_path(actual_file))
                        skipped_markers[marker] = marker_file

                    self.log.debug(
                        "Searching for symbolic links in '%s' archive..." % layer_tar_file)

                    # Scan for all symlinks in the layer and save them
                    # for later processing.
                    for member in members:
                        if member.issym():
                            normalized_name = self._normalize_path(member.name)
                            skipped_sym_link_files[normalized_name] = member
                            continue

                    to_skip.append(files_to_skip)
                    skipped_sym_links.append(skipped_sym_link_files)

                    self.log.debug("Done, found %s files" %
                                   len(skipped_sym_link_files))
                    skipped_files_in_layer = {}

                    # Copy all the files to the new tar
                    for member in members:
                        # Skip all symlinks, we'll investigate them later
                        if member.issym():
                            continue

                        normalized_name = self._normalize_path(member.name)

                        if member in six.iterkeys(skipped_markers):
                            self.log.debug(
                                "Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
                            continue

                        if self._file_should_be_skipped(normalized_name, skipped_sym_links):
                            self.log.debug(
                                "Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)

                            if member.isfile():
                                f = (member, layer_tar.extractfile(member))
                            else:
                                f = (member, None)

                            skipped_files_in_layer[normalized_name] = f
                            continue

                        # Skip files that are marked to be skipped
                        if self._file_should_be_skipped(normalized_name, to_skip):
                            self.log.debug(
                                "Skipping '%s' file because it's on the list to skip files" % normalized_name)
                            continue

                        # Check if file is already added to the archive
                        if normalized_name in squashed_files:
                            # File already exist in the squashed archive, skip it because
                            # file want to add is older than the one already in the archive.
                            # This is true because we do reverse squashing - from
                            # newer to older layer
                            self.log.debug(
                                "Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
                            continue

                        # Hard links are processed after everything else
                        if member.islnk():
                            skipped_hard_link_files[normalized_name] = member
                            continue

                        content = None

                        if member.isfile():
                            content = layer_tar.extractfile(member)

                        self._add_file(member, content,
                                       squashed_tar, squashed_files, to_skip)

                    skipped_hard_links.append(skipped_hard_link_files)
                    skipped_files.append(skipped_files_in_layer)

            self._add_hardlinks(squashed_tar, squashed_files,
                                to_skip, skipped_hard_links)
            added_symlinks = self._add_symlinks(
                squashed_tar, squashed_files, to_skip, skipped_sym_links)

            for layer in skipped_files:
                for member, content in six.itervalues(layer):
                    self._add_file(member, content, squashed_tar,
                                   squashed_files, added_symlinks)

            if files_in_layers_to_move:
                self._add_markers(skipped_markers, squashed_tar,
                                  files_in_layers_to_move, added_symlinks)

        self.log.info("Squashing finished!")
Пример #32
0
 def get_section_name(self, section):
     cfn_to_hot_attrs = dict(
         zip(six.itervalues(self._HOT_TO_CFN_ATTRS),
             six.iterkeys(self._HOT_TO_CFN_ATTRS)))
     return cfn_to_hot_attrs.get(section, section)
Пример #33
0
def process_message_event(event_template, users):
    # type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
    realm_presences = {
        int(k): v
        for k, v in event_template['presences'].items()
    }  # type: Dict[int, Dict[Text, Dict[str, Any]]]
    sender_queue_id = event_template.get('sender_queue_id',
                                         None)  # type: Optional[str]
    message_dict_markdown = event_template[
        'message_dict_markdown']  # type: Dict[str, Any]
    message_dict_no_markdown = event_template[
        'message_dict_no_markdown']  # type: Dict[str, Any]
    sender_id = message_dict_markdown['sender_id']  # type: int
    message_id = message_dict_markdown['id']  # type: int
    message_type = message_dict_markdown['type']  # type: str
    sending_client = message_dict_markdown['client']  # type: Text

    # To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
    send_to_clients = {}  # type: Dict[str, Dict[str, Any]]

    # Extra user-specific data to include
    extra_user_data = {}  # type: Dict[int, Any]

    if 'stream_name' in event_template and not event_template.get(
            "invite_only"):
        for client in get_client_descriptors_for_realm_all_streams(
                event_template['realm_id']):
            send_to_clients[client.event_queue.id] = {
                'client': client,
                'flags': None
            }
            if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
                send_to_clients[client.event_queue.id]['is_sender'] = True

    for user_data in users:
        user_profile_id = user_data['id']  # type: int
        flags = user_data.get('flags', [])  # type: Iterable[str]

        for client in get_client_descriptors_for_user(user_profile_id):
            send_to_clients[client.event_queue.id] = {
                'client': client,
                'flags': flags
            }
            if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
                send_to_clients[client.event_queue.id]['is_sender'] = True

        # If the recipient was offline and the message was a single or group PM to him
        # or she was @-notified potentially notify more immediately
        received_pm = message_type == "private" and user_profile_id != sender_id
        mentioned = 'mentioned' in flags
        idle = receiver_is_idle(user_profile_id, realm_presences)
        always_push_notify = user_data.get('always_push_notify', False)
        if (received_pm or mentioned) and (idle or always_push_notify):
            notice = build_offline_notification(user_profile_id, message_id)
            queue_json_publish("missedmessage_mobile_notifications", notice,
                               lambda notice: None)
            notified = dict(push_notified=True)  # type: Dict[str, bool]
            # Don't send missed message emails if always_push_notify is True
            if idle:
                # We require RabbitMQ to do this, as we can't call the email handler
                # from the Tornado process. So if there's no rabbitmq support do nothing
                queue_json_publish("missedmessage_emails", notice,
                                   lambda notice: None)
                notified['email_notified'] = True

            extra_user_data[user_profile_id] = notified

    for client_data in six.itervalues(send_to_clients):
        client = client_data['client']
        flags = client_data['flags']
        is_sender = client_data.get('is_sender', False)  # type: bool
        extra_data = extra_user_data.get(
            client.user_profile_id, None)  # type: Optional[Mapping[str, bool]]

        if not client.accepts_messages():
            # The actual check is the accepts_event() check below;
            # this line is just an optimization to avoid copying
            # message data unnecessarily
            continue

        if client.apply_markdown:
            message_dict = message_dict_markdown
        else:
            message_dict = message_dict_no_markdown

        # Make sure Zephyr mirroring bots know whether stream is invite-only
        if "mirror" in client.client_type_name and event_template.get(
                "invite_only"):
            message_dict = message_dict.copy()
            message_dict["invite_only_stream"] = True

        if flags is not None:
            message_dict['is_mentioned'] = 'mentioned' in flags
        user_event = dict(type='message', message=message_dict,
                          flags=flags)  # type: Dict[str, Any]
        if extra_data is not None:
            user_event.update(extra_data)

        if is_sender:
            local_message_id = event_template.get('local_id', None)
            if local_message_id is not None:
                user_event["local_message_id"] = local_message_id

        if not client.accepts_event(user_event):
            continue

        # The below prevents (Zephyr) mirroring loops.
        if ('mirror' in sending_client
                and sending_client.lower() == client.client_type_name.lower()):
            continue
        client.add_event(user_event)
Пример #34
0
    def build_update(self):
        """Simulate Hamiltonian dynamics using a numerical integrator.
    Correct for the integrator's discretization error using an
    acceptance ratio.

    Notes
    -----
    The updates assume each Empirical random variable is directly
    parameterized by ``tf.Variable``s.
    """
        old_sample = {
            z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
            for z, qz in six.iteritems(self.latent_vars)
        }
        old_sample = OrderedDict(old_sample)

        # Sample momentum.
        old_r_sample = OrderedDict()
        for z, qz in six.iteritems(self.latent_vars):
            event_shape = qz.event_shape
            normal = Normal(loc=tf.zeros(event_shape),
                            scale=tf.ones(event_shape))
            old_r_sample[z] = normal.sample()

        # Simulate Hamiltonian dynamics.
        new_sample, new_r_sample = leapfrog(old_sample, old_r_sample,
                                            self.step_size, self._log_joint,
                                            self.n_steps)

        # Calculate acceptance ratio.
        ratio = tf.reduce_sum([
            0.5 * tf.reduce_sum(tf.square(r))
            for r in six.itervalues(old_r_sample)
        ])
        ratio -= tf.reduce_sum([
            0.5 * tf.reduce_sum(tf.square(r))
            for r in six.itervalues(new_r_sample)
        ])
        ratio += self._log_joint(new_sample)
        ratio -= self._log_joint(old_sample)

        # Accept or reject sample.
        u = Uniform().sample()
        accept = tf.log(u) < ratio
        sample_values = tf.cond(accept,
                                lambda: list(six.itervalues(new_sample)),
                                lambda: list(six.itervalues(old_sample)))
        if not isinstance(sample_values, list):
            # ``tf.cond`` returns tf.Tensor if output is a list of size 1.
            sample_values = [sample_values]

        sample = {
            z: sample_value
            for z, sample_value in zip(six.iterkeys(new_sample), sample_values)
        }

        # Update Empirical random variables.
        assign_ops = []
        for z, qz in six.iteritems(self.latent_vars):
            variable = qz.get_variables()[0]
            assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

        # Increment n_accept (if accepted).
        assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
        return tf.group(*assign_ops)
Пример #35
0
    def _generate_examples_from_raw(self, filepath, **kwargs):
        process_label = self.builder_config.process_label
        label_classes = self.builder_config.label_classes
        with open(filepath, 'r', encoding="utf8") as inf:
            if self.builder_config.name in ["cmrc2018", "drcd"]:
                cmrc = json.load(inf)
                for article in cmrc["data"]:
                    title = article.get("title", "").strip()
                    for paragraph in article["paragraphs"]:
                        context = paragraph["context"].strip()
                        for qa in paragraph["qas"]:
                            question = qa["question"].strip()
                            id_ = qa["id"]

                            answer_starts = [
                                answer["answer_start"]
                                for answer in qa["answers"]
                            ]
                            answers = [
                                answer["text"].strip()
                                for answer in qa["answers"]
                            ]

                            # Features currently used are "context", "question", and "answers".
                            # Others are extracted here for the ease of future expansions.
                            yield {
                                "title": title,
                                "context": context,
                                "question": question,
                                "id": id_,
                                "answers": {
                                    "answer_start": answer_starts,
                                    "text": answers,
                                },
                            }
            else:
                for idx, line in enumerate(inf):
                    if self.builder_config.name not in ["cmrc2018"]:
                        row = json.loads(line)
                    else:
                        row = line
                    example = {
                        feat: row[col]
                        for feat, col in six.iteritems(
                            self.builder_config.text_features)
                    }
                    example["idx"] = idx
                    if self.builder_config.label_column is not None:
                        if self.builder_config.label_column in row:
                            label = row[self.builder_config.label_column]
                            # For some tasks, the label is represented as 0 and 1 in the tsv
                            # files and needs to be cast to integer to work with the feature.
                            if label_classes and label not in label_classes:
                                label = int(label) if label else None
                            example["label"] = process_label(label)
                        else:
                            example["label"] = process_label(-1)

                    # Filter out corrupted rows.
                    for value in six.itervalues(example):
                        if value is None:
                            break
                    else:
                        yield example
Пример #36
0
    def get_callbacks(self):
        opts = self.opts
        callbacks = []

        if opts.val_files:
            callbacks.append(kcbk.EarlyStopping(
                'val_loss' if opts.val_files else 'loss',
                patience=opts.early_stopping,
                verbose=1
            ))

        callbacks.append(kcbk.ModelCheckpoint(
            os.path.join(opts.out_dir, 'model_weights_train.h5'),
            save_best_only=False))
        monitor = 'val_loss' if opts.val_files else 'loss'
        callbacks.append(kcbk.ModelCheckpoint(
            os.path.join(opts.out_dir, 'model_weights_val.h5'),
            monitor=monitor,
            save_best_only=True, verbose=1
        ))

        max_time = int(opts.max_time * 3600) if opts.max_time else None
        callbacks.append(cbk.TrainingStopper(
            max_time=max_time,
            stop_file=opts.stop_file,
            verbose=1
        ))

        def learning_rate_schedule(epoch):
            lr = opts.learning_rate * opts.learning_rate_decay**epoch
            print('Learning rate: %.3g' % lr)
            return lr

        callbacks.append(kcbk.LearningRateScheduler(learning_rate_schedule))

        def save_lc(epoch, epoch_logs, val_epoch_logs):
            logs = {'lc_train.tsv': epoch_logs,
                    'lc_val.tsv': val_epoch_logs}
            for name, logs in six.iteritems(logs):
                if not logs:
                    continue
                logs = pd.DataFrame(logs)
                with open(os.path.join(opts.out_dir, name), 'w') as f:
                    f.write(perf_logs_str(logs))

        metrics = OrderedDict()
        for metric_funs in six.itervalues(self.metrics):
            for metric_fun in metric_funs:
                metrics[metric_fun.__name__] = True
        metrics = ['loss'] + list(metrics.keys())

        self.perf_logger = cbk.PerformanceLogger(
            callbacks=[save_lc],
            metrics=metrics,
            precision=LOG_PRECISION,
            verbose=not opts.no_log_outputs
        )
        callbacks.append(self.perf_logger)

        if not opts.no_tensorboard:
            callbacks.append(kcbk.TensorBoard(
                log_dir=opts.out_dir,
                histogram_freq=0,
                write_graph=True,
                write_images=True
            ))

        return callbacks
Пример #37
0
    def make_all(self, profiler=None, input_storage=None,
                 output_storage=None, storage_map=None,
                 ):
        fgraph = self.fgraph
        order = self.schedule(fgraph)
        no_recycling = self.no_recycling

        input_storage, output_storage, storage_map = link.map_storage(
            fgraph, order, input_storage, output_storage, storage_map)
        compute_map = {}
        for k in storage_map:
            compute_map[k] = [k.owner is None]

        thunks = []

        # Collect Reallocation Info
        compute_map_re = defaultdict(lambda: [0])
        for var in fgraph.inputs:
            compute_map_re[var][0] = 1

        if getattr(fgraph.profile, 'dependencies', None):
            dependencies = getattr(fgraph.profile, 'dependencies')
        else:
            dependencies = self.compute_gc_dependencies(storage_map)

        reallocated_info = calculate_reallocate_info(
            order, fgraph, storage_map, compute_map_re, dependencies)

        for node in order:
            try:
                if self.c_thunks is False:
                    node.op._op_use_c_code = False
                thunks.append(node.op.make_thunk(node,
                                                 storage_map,
                                                 compute_map,
                                                 no_recycling))
                if not hasattr(thunks[-1], 'lazy'):
                    # We don't want all ops maker to think about lazy Ops.
                    # So if they didn't specify that its lazy or not, it isn't.
                    # If this member isn't present, it will crash later.
                    thunks[-1].lazy = False
            except Exception as e:
                e.args = ("The following error happened while"
                          " compiling the node", node, "\n") + e.args
                raise
        for node, thunk in zip(order, thunks):
            thunk.inputs = [storage_map[v] for v in node.inputs]
            thunk.outputs = [storage_map[v] for v in node.outputs]

        lazy = self.lazy
        if lazy is None:
            lazy = config.vm.lazy
        if lazy is None:
            lazy = not all([(not th.lazy) for th in thunks])
        if not (lazy or (config.profile and config.profile_memory) or
                self.use_cloop or self.callback):
            for pair in itervalues(reallocated_info):
                storage_map[pair[1]] = storage_map[pair[0]]

        computed, last_user = link.gc_helper(order)
        if self.allow_gc:
            post_thunk_clear = []
            for node in order:
                clear_after_this_thunk = []
                for input in node.inputs:
                    if (input in computed and
                            input not in fgraph.outputs and
                            node == last_user[input] and
                            input not in reallocated_info):
                        clear_after_this_thunk.append(storage_map[input])
                post_thunk_clear.append(clear_after_this_thunk)
        else:
            post_thunk_clear = None

        vm = self.make_vm(order, thunks,
                          input_storage, output_storage, storage_map,
                          post_thunk_clear,
                          computed,
                          compute_map,
                          self.updated_vars,
                          )

        vm.storage_map = storage_map

        return (vm,
                [link.Container(input, storage)
                 for input, storage in zip(fgraph.inputs, input_storage)],
                [link.Container(output, storage, True)
                 for output, storage in zip(fgraph.outputs, output_storage)],
                thunks,
                order)
Пример #38
0
 def disconnect(self, listener_id):
     self.debug('[app] disconnecting event: [id=%s]', listener_id)
     for event in itervalues(self._listeners):
         event.pop(listener_id, None)
Пример #39
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags

        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')

        culprit = data.pop('transaction', None)
        if not culprit:
            culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        dist = data.pop('dist', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            # if we generate an implicit culprit, lets not call it a
            # transaction
            transaction_name = None
            culprit = generate_culprit(data, platform=platform)
        else:
            transaction_name = culprit

        recorded_timestamp = data.pop('timestamp')
        date = datetime.fromtimestamp(recorded_timestamp)
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(
            project_id=project.id,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )
        event._project_cache = project

        # convert this to a dict to ensure we're only storing one value per key
        # as most parts of Sentry dont currently play well with multiple values
        tags = dict(data.get('tags') or [])
        tags['level'] = LOG_LEVELS[level]
        if logger_name:
            tags['logger'] = logger_name
        if server_name:
            tags['server_name'] = server_name
        if site:
            tags['site'] = site
        if environment:
            tags['environment'] = environment
        if transaction_name:
            tags['transaction'] = transaction_name

        if release:
            # dont allow a conflicting 'release' tag
            if 'release' in tags:
                del tags['release']
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            tags['sentry:release'] = release.version

        if dist and release:
            dist = release.add_dist(dist, date)
            tags['sentry:dist'] = dist.name
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            if 'user' in tags:
                del tags['user']
            tags['sentry:user'] = event_user.tag_value

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        normalize_in_app(data)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    tags.setdefault(key, value)

        # tags are stored as a tuple
        tags = tags.items()

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or ['{{ default }}']

        for path, iface in six.iteritems(event.interfaces):
            data['tags'].extend(iface.iter_tags())
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)]
        elif checksum:
            hashes = [checksum]
            data['checksum'] = checksum
        else:
            hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, six.string_types):
            message = force_text(message)

        for value in six.itervalues(event_metadata):
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        if culprit and culprit not in message:
            culprit_u = force_text(culprit, errors='replace')
            message = u'{} {}'.format(message, culprit_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        received_timestamp = event.data.get('received') or float(event.datetime.strftime('%s'))
        group_kwargs = kwargs.copy()
        group_kwargs.update(
            {
                'culprit': culprit,
                'logger': logger_name,
                'level': level,
                'last_seen': date,
                'first_seen': date,
                'active_at': date,
                'data': {
                    'last_received': received_timestamp,
                    'type':
                    event_type.key,
                    # we cache the events metadata on the group to ensure its
                    # accessible in the stream
                    'metadata':
                    event_metadata,
                },
            }
        )

        if release:
            group_kwargs['first_release'] = release

        try:
            group, is_new, is_regression, is_sample = self._save_aggregate(
                event=event, hashes=hashes, release=release, **group_kwargs
            )
        except HashDiscarded:
            event_discarded.send_robust(
                project=project,
                sender=EventManager,
            )

            metrics.incr(
                'events.discarded',
                skip_internal=True,
                tags={
                    'organization_id': project.organization_id,
                    'platform': platform,
                },
            )
            raise
        else:
            event_saved.send_robust(
                project=project,
                sender=EventManager,
            )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        # When an event was sampled, the canonical source of truth
        # is the EventMapping table since we aren't going to be writing out an actual
        # Event row. Otherwise, if the Event isn't being sampled, we can safely
        # rely on the Event table itself as the source of truth and ignore
        # EventMapping since it's redundant information.
        if is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(EventMapping)):
                    EventMapping.objects.create(project=project, group=group, event_id=event_id)
            except IntegrityError:
                self.logger.info(
                    'duplicate.found',
                    exc_info=True,
                    extra={
                        'event_uuid': event_id,
                        'project_id': project.id,
                        'group_id': group.id,
                        'model': EventMapping.__name__,
                    }
                )
                return event

        # We now always need to check the Event table for dupes
        # since EventMapping isn't exactly the canonical source of truth.
        if Event.objects.filter(
            project_id=project.id,
            event_id=event_id,
        ).exists():
            self.logger.info(
                'duplicate.found',
                exc_info=True,
                extra={
                    'event_uuid': event_id,
                    'project_id': project.id,
                    'group_id': group.id,
                    'model': Event.__name__,
                }
            )
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info(
                    'duplicate.found',
                    exc_info=True,
                    extra={
                        'event_uuid': event_id,
                        'project_id': project.id,
                        'group_id': group.id,
                        'model': Event.__name__,
                    }
                )
                return event

            index_event_tags.delay(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
                ),
                timestamp=event.datetime,
                environment_id=environment.id,
            )

        if is_new and release:
            buffer.incr(
                ReleaseProject, {'new_groups': 1}, {
                    'release_id': release.id,
                    'project_id': project.id,
                }
            )

        safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project, group=group, sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id})

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        metrics.timing(
            'events.latency',
            received_timestamp - recorded_timestamp,
            tags={
                'project_id': project.id,
            },
        )

        return event
Пример #40
0
 def packages(self):
     for package_versions in six.itervalues(self._packages_by_name):
         for package in six.itervalues(package_versions):
             yield package
Пример #41
0
    def persist_and_notify_client_event(
        self,
        requester,
        event,
        context,
        ratelimit=True,
        extra_users=[],
    ):
        """Called when we have fully built the event, have already
        calculated the push actions for the event, and checked auth.

        This should only be run on master.
        """
        assert not self.config.worker_app

        if ratelimit:
            yield self.base_handler.ratelimit(requester)

        yield self.base_handler.maybe_kick_guest_users(event, context)

        if event.type == EventTypes.CanonicalAlias:
            # Check the alias is acually valid (at this time at least)
            room_alias_str = event.content.get("alias", None)
            if room_alias_str:
                room_alias = RoomAlias.from_string(room_alias_str)
                directory_handler = self.hs.get_handlers().directory_handler
                mapping = yield directory_handler.get_association(room_alias)

                if mapping["room_id"] != event.room_id:
                    raise SynapseError(
                        400, "Room alias %s does not point to the room" %
                        (room_alias_str, ))

        federation_handler = self.hs.get_handlers().federation_handler

        if event.type == EventTypes.Member:
            if event.content["membership"] == Membership.INVITE:

                def is_inviter_member_event(e):
                    return (e.type == EventTypes.Member
                            and e.sender == event.sender)

                state_to_include_ids = [
                    e_id for k, e_id in iteritems(context.current_state_ids)
                    if k[0] in self.hs.config.room_invite_state_types or k == (
                        EventTypes.Member, event.sender)
                ]

                state_to_include = yield self.store.get_events(
                    state_to_include_ids)

                event.unsigned["invite_room_state"] = [{
                    "type": e.type,
                    "state_key": e.state_key,
                    "content": e.content,
                    "sender": e.sender,
                } for e in itervalues(state_to_include)]

                invitee = UserID.from_string(event.state_key)
                if not self.hs.is_mine(invitee):
                    # TODO: Can we add signature from remote server in a nicer
                    # way? If we have been invited by a remote server, we need
                    # to get them to sign the event.

                    returned_invite = yield federation_handler.send_invite(
                        invitee.domain,
                        event,
                    )

                    event.unsigned.pop("room_state", None)

                    # TODO: Make sure the signatures actually are correct.
                    event.signatures.update(returned_invite.signatures)

        if event.type == EventTypes.Redaction:
            auth_events_ids = yield self.auth.compute_auth_events(
                event,
                context.prev_state_ids,
                for_verification=True,
            )
            auth_events = yield self.store.get_events(auth_events_ids)
            auth_events = {(e.type, e.state_key): e
                           for e in auth_events.values()}
            if self.auth.check_redaction(event, auth_events=auth_events):
                original_event = yield self.store.get_event(
                    event.redacts,
                    check_redacted=False,
                    get_prev_content=False,
                    allow_rejected=False,
                    allow_none=False)
                if event.user_id != original_event.user_id:
                    raise AuthError(
                        403, "You don't have permission to redact events")

        if event.type == EventTypes.Create and context.prev_state_ids:
            raise AuthError(
                403,
                "Changing the room create event is forbidden",
            )

        (event_stream_id,
         max_stream_id) = yield self.store.persist_event(event,
                                                         context=context)

        # this intentionally does not yield: we don't care about the result
        # and don't need to wait for it.
        run_in_background(self.pusher_pool.on_new_notifications,
                          event_stream_id, max_stream_id)

        def _notify():
            try:
                self.notifier.on_new_room_event(event,
                                                event_stream_id,
                                                max_stream_id,
                                                extra_users=extra_users)
            except Exception:
                logger.exception("Error notifying about new room event")

        run_in_background(_notify)

        if event.type == EventTypes.Message:
            # We don't want to block sending messages on any presence code. This
            # matters as sometimes presence code can take a while.
            run_in_background(self._bump_active_time, requester.user)
Пример #42
0
 def stop_all_listeners(self):
     for listener in six.itervalues(self.exchange_topic_listener_map):
         listener.stop()
         listener.wait()
Пример #43
0
 def __del__(self):
     for f in six.itervalues(self.revert_ws_cap_files):
         grass.try_remove(f)
Пример #44
0
    def Populate(self, data):
        """Populate the list"""
        self.itemDataMap = dict()
        self.itemIdMap = list()

        if self.shape:
            items = self.frame.GetModel().GetItems(objType=ModelAction)
            if isinstance(self.shape, ModelCondition):
                if self.GetLabel() == "ElseBlockList":
                    shapeItems = map(lambda x: x.GetId(),
                                     self.shape.GetItems(items)["else"])
                else:
                    shapeItems = map(lambda x: x.GetId(),
                                     self.shape.GetItems(items)["if"])
            else:
                shapeItems = map(lambda x: x.GetId(),
                                 self.shape.GetItems(items))
        else:
            shapeItems = list()

        i = 0
        if len(self.columns) == 2:  # ItemCheckList
            checked = list()
        for action in data:
            if isinstance(action, ModelData) or action == self.shape:
                continue

            self.itemIdMap.append(action.GetId())

            if len(self.columns) == 2:
                self.itemDataMap[i] = [action.GetLabel(), action.GetLog()]
                aId = action.GetBlockId()
                if action.GetId() in shapeItems:
                    checked.append(aId)
                else:
                    checked.append(None)
            else:
                bId = action.GetBlockId()
                if not bId:
                    bId = _("No")
                else:
                    bId = _("Yes")
                options = action.GetParameterizedParams()
                params = []
                for f in options["flags"]:
                    params.append("-{0}".format(f["name"]))
                for p in options["params"]:
                    params.append(p["name"])

                self.itemDataMap[i] = [
                    action.GetLabel(),
                    bId,
                    ",".join(params),
                    action.GetLog(),
                ]

            i += 1

        self.itemCount = len(self.itemDataMap.keys())
        self.DeleteAllItems()
        i = 0
        if len(self.columns) == 2:
            for name, desc in six.itervalues(self.itemDataMap):
                index = self.InsertItem(i, str(i))
                self.SetItem(index, 0, name)
                self.SetItem(index, 1, desc)
                self.SetItemData(index, i)
                if checked[i]:
                    self.CheckItem(index, True)
                i += 1
        else:
            for name, inloop, param, desc in six.itervalues(self.itemDataMap):
                index = self.InsertItem(i, str(i))
                self.SetItem(index, 0, name)
                self.SetItem(index, 1, inloop)
                self.SetItem(index, 2, param)
                self.SetItem(index, 3, desc)
                self.SetItemData(index, i)
                i += 1
Пример #45
0
    def _getCapFiles(self):
        ws_cap_files = {}
        for v in six.itervalues(self.ws_panels):
            ws_cap_files[v['panel'].GetWebService()] = v['panel'].GetCapFile()

        return ws_cap_files
Пример #46
0
    def __init__(self, activity):
        super(ReleaseActivityEmail, self).__init__(activity)
        self.organization = self.project.organization
        self.user_id_team_lookup = None
        self.email_list = {}
        self.user_ids = {}

        try:
            self.deploy = Deploy.objects.get(id=activity.data['deploy_id'])
        except Deploy.DoesNotExist:
            self.deploy = None

        try:
            self.release = Release.objects.get(
                organization_id=self.project.organization_id,
                version=activity.data['version'],
            )
        except Release.DoesNotExist:
            self.release = None
            self.repos = []
            self.projects = []
        else:
            self.projects = list(self.release.projects.all())
            self.commit_list = [
                rc.commit
                for rc in ReleaseCommit.objects.filter(release=self.release, ).
                select_related('commit', 'commit__author')
            ]
            repos = {
                r_id: {
                    'name': r_name,
                    'commits': [],
                }
                for r_id, r_name in Repository.objects.filter(
                    organization_id=self.project.organization_id,
                    id__in={c.repository_id
                            for c in self.commit_list}).values_list(
                                'id', 'name')
            }

            self.email_list = set(
                [c.author.email for c in self.commit_list if c.author])
            if self.email_list:
                users = {
                    ue.email: ue.user
                    for ue in UserEmail.objects.filter(
                        in_iexact('email', self.email_list),
                        is_verified=True,
                        user__sentry_orgmember_set__organization=self.
                        organization,
                    ).select_related('user')
                }
                self.user_ids = {u.id for u in six.itervalues(users)}

            else:
                users = {}

            for commit in self.commit_list:
                repos[commit.repository_id]['commits'].append(
                    (commit, users.get(commit.author.email)
                     if commit.author_id else None))

            self.repos = repos.values()

            self.environment = Environment.objects.get(
                id=self.deploy.environment_id).name or 'Default Environment'

            self.group_counts_by_project = dict(
                Group.objects.filter(
                    project__in=self.projects,
                    id__in=GroupLink.objects.filter(
                        linked_type=GroupLink.LinkedType.commit,
                        linked_id__in=ReleaseCommit.objects.filter(
                            release=self.release, ).values_list('commit_id',
                                                                flat=True),
                    ).values_list('group_id', flat=True),
                ).values_list('project').annotate(num_groups=Count('id')))
Пример #47
0
    def transform(self, X):
        """Calculate the kernel matrix, between given and fitted dataset.

        Parameters
        ----------
        X : iterable
            Each element must be an iterable with at most three features and at
            least one. The first that is obligatory is a valid graph structure
            (adjacency matrix or edge_dictionary) while the second is
            node_labels and the third edge_labels (that fitting the given graph
            format). If None the kernel matrix is calculated upon fit data.
            The test samples.

        Returns
        -------
        K : numpy array, shape = [n_targets, n_input_graphs]
            corresponding to the kernel matrix, a calculation between
            all pairs of graphs between target an features

        """
        self._method_calling = 3
        # Check is fit had been called
        check_is_fitted(self, ['X', '_nx', '_inv_labels'])

        # Input validation and parsing
        if X is None:
            raise ValueError('transform input cannot be None')
        else:
            if not isinstance(X, collections.Iterable):
                raise ValueError('input must be an iterable\n')
            else:
                nx = 0
                distinct_values = set()
                Gs_ed, L = dict(), dict()
                for (i, x) in enumerate(iter(X)):
                    is_iter = isinstance(x, collections.Iterable)
                    if is_iter:
                        x = list(x)
                    if is_iter and len(x) in [0, 2, 3]:
                        if len(x) == 0:
                            warnings.warn('Ignoring empty element on index: ' +
                                          str(i))
                            continue

                        elif len(x) in [2, 3]:
                            x = Graph(x[0], x[1], {}, self._graph_format)
                    elif type(x) is Graph:
                        x.desired_format("dictionary")
                    else:
                        raise ValueError('each element of X must have at ' +
                                         'least one and at most 3 elements\n')
                    Gs_ed[nx] = x.get_edge_dictionary()
                    L[nx] = x.get_labels(purpose="dictionary")

                    # Hold all the distinct values
                    distinct_values |= set(v for v in itervalues(L[nx])
                                           if v not in self._inv_labels[0])
                    nx += 1
                if nx == 0:
                    raise ValueError('parsed input is empty')

        nl = len(self._inv_labels[0])
        WL_labels_inverse = {
            dv: idx
            for (idx, dv) in enumerate(sorted(list(distinct_values)), nl)
        }

        def generate_graphs(WL_labels_inverse, nl):
            # calculate the kernel matrix for the 0 iteration
            new_graphs = list()
            for j in range(nx):
                new_labels = dict()
                for (k, v) in iteritems(L[j]):
                    if v in self._inv_labels[0]:
                        new_labels[k] = self._inv_labels[0][v]
                    else:
                        new_labels[k] = WL_labels_inverse[v]
                L[j] = new_labels
                # produce the new graphs
                new_graphs.append([Gs_ed[j], new_labels])
            yield new_graphs

            for i in range(1, self._n_iter):
                new_graphs = list()
                L_temp, label_set = dict(), set()
                nl += len(self._inv_labels[i])
                for j in range(nx):
                    # Find unique labels and sort them for both graphs
                    # Keep for each node the temporary
                    L_temp[j] = dict()
                    for v in Gs_ed[j].keys():
                        credential = str(L[j][v]) + "," + \
                            str(sorted([L[j][n] for n in Gs_ed[j][v].keys()]))
                        L_temp[j][v] = credential
                        if credential not in self._inv_labels[i]:
                            label_set.add(credential)

                # Calculate the new label_set
                WL_labels_inverse = dict()
                if len(label_set) > 0:
                    for dv in sorted(list(label_set)):
                        idx = len(WL_labels_inverse) + nl
                        WL_labels_inverse[dv] = idx

                # Recalculate labels
                new_graphs = list()
                for j in range(nx):
                    new_labels = dict()
                    for (k, v) in iteritems(L_temp[j]):
                        if v in self._inv_labels[i]:
                            new_labels[k] = self._inv_labels[i][v]
                        else:
                            new_labels[k] = WL_labels_inverse[v]
                    L[j] = new_labels
                    # Create the new graphs with the new labels.
                    new_graphs.append([Gs_ed[j], new_labels])
                yield new_graphs

        if self._parallel is None:
            # Calculate the kernel matrix without parallelization
            K = np.sum(
                (self.X[i].transform(g)
                 for (i,
                      g) in enumerate(generate_graphs(WL_labels_inverse, nl))),
                axis=0)

        else:
            # Calculate the kernel marix with parallelization
            K = np.sum(self._parallel(
                joblib.delayed(etransform)(self.X[i], g)
                for (i,
                     g) in enumerate(generate_graphs(WL_labels_inverse, nl))),
                       axis=0)

        self._is_transformed = True
        if self.normalize:
            X_diag, Y_diag = self.diagonal()
            old_settings = np.seterr(divide='ignore')
            K = np.nan_to_num(np.divide(K, np.sqrt(np.outer(Y_diag, X_diag))))
            np.seterr(**old_settings)

        return K
Пример #48
0
 def OnOutputLayerName(self, event):
     """Update layer name to web service panel
     """
     lname = event.GetString()
     for v in six.itervalues(self.ws_panels):
         v['panel'].SetOutputLayerName(lname.strip())
Пример #49
0
def NormalizeNumberFields(pb):
  """Normalizes types and precisions of number fields in a protocol buffer.

  Due to subtleties in the python protocol buffer implementation, it is possible
  for values to have different types and precision depending on whether they
  were set and retrieved directly or deserialized from a protobuf. This function
  normalizes integer values to ints and longs based on width, 32-bit floats to
  five digits of precision to account for python always storing them as 64-bit,
  and ensures doubles are floating point for when they're set to integers.

  Modifies pb in place. Recurses into nested objects.

  Args:
    pb: proto2 message.

  Returns:
    the given pb, modified in place.
  """
  for desc, values in pb.ListFields():
    is_repeated = True
    if desc.label is not descriptor.FieldDescriptor.LABEL_REPEATED:
      is_repeated = False
      values = [values]

    normalized_values = None

    # We force 32-bit values to int and 64-bit values to long to make
    # alternate implementations where the distinction is more significant
    # (e.g. the C++ implementation) simpler.
    if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
                     descriptor.FieldDescriptor.TYPE_UINT64,
                     descriptor.FieldDescriptor.TYPE_SINT64):
      normalized_values = [int(x) for x in values]
    elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
                       descriptor.FieldDescriptor.TYPE_UINT32,
                       descriptor.FieldDescriptor.TYPE_SINT32,
                       descriptor.FieldDescriptor.TYPE_ENUM):
      normalized_values = [int(x) for x in values]
    elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
      normalized_values = [round(x, 6) for x in values]
    elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
      normalized_values = [round(float(x), 7) for x in values]

    if normalized_values is not None:
      if is_repeated:
        pb.ClearField(desc.name)
        getattr(pb, desc.name).extend(normalized_values)
      else:
        setattr(pb, desc.name, normalized_values[0])

    if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
        desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
      if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
          desc.message_type.has_options and
          desc.message_type.GetOptions().map_entry):
        # This is a map, only recurse if the values have a message type.
        if (desc.message_type.fields_by_number[2].type ==
            descriptor.FieldDescriptor.TYPE_MESSAGE):
          for v in six.itervalues(values):
            NormalizeNumberFields(v)
      else:
        for v in values:
          # recursive step
          NormalizeNumberFields(v)

  return pb
Пример #50
0
def lint(repo_root, paths, output_format, ignore_glob):
    error_count = defaultdict(int)
    last = None

    with open(os.path.join(repo_root, "lint.whitelist")) as f:
        whitelist, ignored_files = parse_whitelist(f)

    if ignore_glob:
        ignored_files.add(ignore_glob)

    output_errors = {
        "json": output_errors_json,
        "markdown": output_errors_markdown,
        "normal": output_errors_text
    }[output_format]

    def process_errors(errors):
        """
        Filters and prints the errors, and updates the ``error_count`` object.

        :param errors: a list of error tuples (error type, message, path, line number)
        :returns: ``None`` if there were no errors, or
                  a tuple of the error type and the path otherwise
        """

        errors = filter_whitelist_errors(whitelist, errors)

        if not errors:
            return None

        output_errors(errors)
        for error_type, error, path, line in errors:
            error_count[error_type] += 1

        return (errors[-1][0], path)

    for path in paths[:]:
        abs_path = os.path.join(repo_root, path)
        if not os.path.exists(abs_path):
            paths.remove(path)
            continue

        if any(
                fnmatch.fnmatch(path, file_match)
                for file_match in ignored_files):
            paths.remove(path)
            continue

        errors = check_path(repo_root, path)
        last = process_errors(errors) or last

        if not os.path.isdir(abs_path):
            with open(abs_path, 'rb') as f:
                errors = check_file_contents(repo_root, path, f)
                last = process_errors(errors) or last

    errors = check_all_paths(repo_root, paths)
    last = process_errors(errors) or last

    if output_format in ("normal", "markdown"):
        output_error_count(error_count)
        if error_count:
            for line in (ERROR_MSG %
                         (last[0], last[1], last[0], last[1])).split("\n"):
                logger.info(line)
    return sum(itervalues(error_count))
    def __compute_aux(self, application_graph, machine_graph, app_vertex,
                      machine_vertex, graph_mapper, routing_info):
        """ Compute all of the relevant pre-synaptic population information,\
            as well as the key of the current vertex.

        :param application_graph: \
            the entire, highest level, graph of the network to be simulated
        :type application_graph: :py:class:`ApplicationGraph`
        :param machine_graph: \
            the entire, lowest level, graph of the network to be simulated
        :type machine_graph: :py:class:`MachineGraph`
        :param app_vertex: \
            the highest level object of the post-synaptic population
        :type app_vertex: :py:class:`ApplicationVertex`
        :param post_slice: \
            the slice of the app vertex corresponding to this machine vertex
        :type post_slice: :py:class:`Slice`
        :param machine_vertex: \
            the lowest level object of the post-synaptic population
        :type machine_vertex: :py:class:`MachineVertex`
        :param graph_mapper: for looking up application vertices
        :type graph_mapper: :py:class:`GraphMapper`
        :param routing_info: All of the routing information on the network
        :type routing_info: :py:class:`RoutingInfo`
        :return: pop info, routing key for current vertex, number of pre pops
        :rtype: tuple
        """
        # Compute the max number of presynaptic subpopulations
        population_to_subpopulation_information = collections.OrderedDict()

        # Can figure out the presynaptic subvertices (machine vertices)
        # for the current machine vertex
        # by calling graph_mapper.get_machine_edges for the relevant
        # application edges (i.e. the structural ones)
        # This allows me to find the partition (?) which then plugged
        # into routing_info can give me the keys
        presynaptic_machine_vertices = []
        structural_application_edges = []
        structural_machine_edges = []
        max_subpartitions = 0

        for app_edge in application_graph.get_edges_ending_at_vertex(
                app_vertex):
            if isinstance(app_edge, ProjectionApplicationEdge):
                for synapse_info in app_edge.synapse_information:
                    if synapse_info.synapse_dynamics is self.__weight_dynamics:
                        structural_application_edges.append(app_edge)
                        population_to_subpopulation_information[
                            app_edge.pre_vertex] = []
                        break

        no_pre_populations = len(structural_application_edges)
        # For each structurally plastic APPLICATION edge find the
        # corresponding machine edges
        for machine_edge in machine_graph.get_edges_ending_at_vertex(
                machine_vertex):
            if isinstance(machine_edge, ProjectionMachineEdge):
                for synapse_info in machine_edge._synapse_information:
                    if synapse_info.synapse_dynamics is self.__weight_dynamics:
                        structural_machine_edges.append(machine_edge)
                        # For each structurally plastic MACHINE edge find the
                        # corresponding presynaptic subvertices
                        presynaptic_machine_vertices.append(
                            machine_edge.pre_vertex)

        # For each presynaptic subvertex figure out the partition (?)
        # to retrieve the key and n_atoms
        for vertex in presynaptic_machine_vertices:
            population_to_subpopulation_information[
                graph_mapper.get_application_vertex(vertex)].append(
                    (routing_info.get_routing_info_from_pre_vertex(
                        vertex, constants.SPIKE_PARTITION_ID).first_key,
                     graph_mapper.get_slice(vertex)[2],
                     graph_mapper.get_slice(vertex)[0],
                     routing_info.get_routing_info_from_pre_vertex(
                         vertex, constants.SPIKE_PARTITION_ID).first_mask))

        for subpopulation_list in itervalues(
                population_to_subpopulation_information):
            max_subpartitions = np.maximum(max_subpartitions,
                                           len(subpopulation_list))

        # Current machine vertex key (for future checks)
        current_key = routing_info.get_routing_info_from_pre_vertex(
            machine_vertex, constants.SPIKE_PARTITION_ID)

        if current_key is not None:
            current_key = current_key.first_key
        else:
            current_key = -1
        return (population_to_subpopulation_information, current_key,
                no_pre_populations)
Пример #52
0
    def parse_input(self, X):
        """Parse input for weisfeiler lehman.

        Parameters
        ----------
        X : iterable
            For the input to pass the test, we must have:
            Each element must be an iterable with at most three features and at
            least one. The first that is obligatory is a valid graph structure
            (adjacency matrix or edge_dictionary) while the second is
            node_labels and the third edge_labels (that correspond to the given
            graph format). A valid input also consists of graph type objects.

        Returns
        -------
        base_graph_kernel : object
        Returns base_graph_kernel.

        """
        if self._method_calling not in [1, 2]:
            raise ValueError('method call must be called either from fit ' +
                             'or fit-transform')
        elif hasattr(self, '_X_diag'):
            # Clean _X_diag value
            delattr(self, '_X_diag')

        # Input validation and parsing
        if not isinstance(X, collections.Iterable):
            raise TypeError('input must be an iterable\n')
        else:
            nx = 0
            Gs_ed, L, distinct_values, extras = dict(), dict(), set(), dict()
            for (idx, x) in enumerate(iter(X)):
                is_iter = isinstance(x, collections.Iterable)
                if is_iter:
                    x = list(x)
                if is_iter and (len(x) == 0 or len(x) >= 2):
                    if len(x) == 0:
                        warnings.warn('Ignoring empty element on index: ' +
                                      str(idx))
                        continue
                    else:
                        if len(x) > 2:
                            extra = tuple()
                            if len(x) > 3:
                                extra = tuple(x[3:])
                            x = Graph(x[0],
                                      x[1],
                                      x[2],
                                      graph_format=self._graph_format)
                            extra = (x.get_labels(purpose=self._graph_format,
                                                  label_type="edge",
                                                  return_none=True), ) + extra
                        else:
                            x = Graph(x[0],
                                      x[1], {},
                                      graph_format=self._graph_format)
                            extra = tuple()

                elif type(x) is Graph:
                    x.desired_format(self._graph_format)
                    el = x.get_labels(purpose=self._graph_format,
                                      label_type="edge",
                                      return_none=True)
                    if el is None:
                        extra = tuple()
                    else:
                        extra = (el, )

                else:
                    raise TypeError('each element of X must be either a ' +
                                    'graph object or a list with at least ' +
                                    'a graph like object and node labels ' +
                                    'dict \n')
                Gs_ed[nx] = x.get_edge_dictionary()
                L[nx] = x.get_labels(purpose="dictionary")
                extras[nx] = extra
                distinct_values |= set(itervalues(L[nx]))
                nx += 1
            if nx == 0:
                raise ValueError('parsed input is empty')

        # Save the number of "fitted" graphs.
        self._nx = nx

        # get all the distinct values of current labels
        WL_labels_inverse = dict()

        # assign a number to each label
        label_count = 0
        for dv in sorted(list(distinct_values)):
            WL_labels_inverse[dv] = label_count
            label_count += 1

        # Initalize an inverse dictionary of labels for all iterations
        self._inv_labels = dict()
        self._inv_labels[0] = WL_labels_inverse

        def generate_graphs(label_count, WL_labels_inverse):
            new_graphs = list()
            for j in range(nx):
                new_labels = dict()
                for k in L[j].keys():
                    new_labels[k] = WL_labels_inverse[L[j][k]]
                L[j] = new_labels
                # add new labels
                new_graphs.append((Gs_ed[j], new_labels) + extras[j])
            yield new_graphs

            for i in range(1, self._n_iter):
                label_set, WL_labels_inverse, L_temp = set(), dict(), dict()
                for j in range(nx):
                    # Find unique labels and sort
                    # them for both graphs
                    # Keep for each node the temporary
                    L_temp[j] = dict()
                    for v in Gs_ed[j].keys():
                        credential = str(L[j][v]) + "," + \
                            str(sorted([L[j][n] for n in Gs_ed[j][v].keys()]))
                        L_temp[j][v] = credential
                        label_set.add(credential)

                label_list = sorted(list(label_set))
                for dv in label_list:
                    WL_labels_inverse[dv] = label_count
                    label_count += 1

                # Recalculate labels
                new_graphs = list()
                for j in range(nx):
                    new_labels = dict()
                    for k in L_temp[j].keys():
                        new_labels[k] = WL_labels_inverse[L_temp[j][k]]
                    L[j] = new_labels
                    # relabel
                    new_graphs.append((Gs_ed[j], new_labels) + extras[j])
                self._inv_labels[i] = WL_labels_inverse
                yield new_graphs

        base_graph_kernel = {
            i: self._base_graph_kernel(**self._params)
            for i in range(self._n_iter)
        }
        if self._parallel is None:
            if self._method_calling == 1:
                for (i, g) in enumerate(
                        generate_graphs(label_count, WL_labels_inverse)):
                    base_graph_kernel[i].fit(g)
            elif self._method_calling == 2:
                K = np.sum(
                    (base_graph_kernel[i].fit_transform(g)
                     for (i, g) in enumerate(
                         generate_graphs(label_count, WL_labels_inverse))),
                    axis=0)

        else:
            if self._method_calling == 1:
                self._parallel(
                    joblib.delayed(efit)(base_graph_kernel[i], g)
                    for (i, g) in enumerate(
                        generate_graphs(label_count, WL_labels_inverse)))
            elif self._method_calling == 2:
                K = np.sum(self._parallel(
                    joblib.delayed(efit_transform)(base_graph_kernel[i], g)
                    for (i, g) in enumerate(
                        generate_graphs(label_count, WL_labels_inverse))),
                           axis=0)

        if self._method_calling == 1:
            return base_graph_kernel
        elif self._method_calling == 2:
            return K, base_graph_kernel
Пример #53
0
def paint_reconstruction(data, graph, reconstruction):
    """Set the color of the points from the color of the tracks."""
    for k, point in iteritems(reconstruction.points):
        point.color = six.next(six.itervalues(graph[k]))['feature_color']
    def __write_presynaptic_information(self, spec, application_graph,
                                        machine_graph, app_vertex, post_slice,
                                        machine_vertex, graph_mapper,
                                        routing_info):
        """ All cores which do synaptic rewiring have information about all\
            the relevant pre-synaptic populations.

        :param spec: the data spec
        :type spec: spec
        :param application_graph: \
            the entire, highest level, graph of the network to be simulated
        :type application_graph: :py:class:`ApplicationGraph`
        :param machine_graph: \
            the entire, lowest level, graph of the network to be simulated
        :type machine_graph: :py:class:`MachineGraph`
        :param app_vertex: \
            the highest level object of the post-synaptic population
        :type app_vertex: :py:class:`ApplicationVertex`
        :param post_slice: \
            the slice of the app vertex corresponding to this machine vertex
        :type post_slice: :py:class:`Slice`
        :param machine_vertex: \
            the lowest level object of the post-synaptic population
        :type machine_vertex: :py:class:`MachineVertex`
        :param graph_mapper: for looking up application vertices
        :type graph_mapper: :py:class:`GraphMapper`
        :param routing_info: All of the routing information on the network
        :type routing_info: :py:class:`RoutingInfo`
        :return: None
        :rtype: None
        """
        # Compute all the auxiliary stuff
        pop_to_subpop_info, current_key, no_prepops = self.__compute_aux(
            application_graph, machine_graph, app_vertex, machine_vertex,
            graph_mapper, routing_info)

        # Table header
        spec.write_value(data=no_prepops)

        total_words_written = 0
        for subpopulation_list in itervalues(pop_to_subpop_info):
            # Population header(s)
            # Number of subpopulations
            spec.write_value(data=len(subpopulation_list),
                             data_type=DataType.UINT16)

            # Custom header for commands / controls
            # currently, controls = True if the subvertex (on the current core)
            # is part of this population
            controls = current_key in np.asarray(subpopulation_list)[:0]
            spec.write_value(data=int(controls), data_type=DataType.UINT16)

            spec.write_value(data=np.sum(np.asarray(subpopulation_list)[:, 1])
                             if len(subpopulation_list) > 0 else 0)
            words_written = 2

            # Ensure the following values are written in ascending
            # order of low_atom (implicit)
            dt = np.dtype([('key', 'uint'), ('n_atoms', 'uint'),
                           ('lo_atom', 'uint'), ('mask', 'uint')])
            structured_array = np.array(subpopulation_list, dtype=dt)
            sorted_info_list = np.sort(structured_array, order='lo_atom')
            for subpopulation_info in sorted_info_list:
                # Subpopulation information (i.e. key and number of atoms)
                # Key
                spec.write_value(data=subpopulation_info[0])
                # n_atoms
                spec.write_value(data=subpopulation_info[1])
                # lo_atom
                spec.write_value(data=subpopulation_info[2])
                # mask
                spec.write_value(data=subpopulation_info[3])
                words_written += 4

            total_words_written += words_written

        # Now we write the probability tables for formation
        # (feedforward and lateral)
        spec.write_value(data=self.__ff_distance_probabilities.size)
        spec.write_array(
            self.__ff_distance_probabilities.view(dtype=np.uint16),
            data_type=DataType.UINT16)
        total_words_written += self.__ff_distance_probabilities.size // 2 + 1
        spec.write_value(data=self.__lat_distance_probabilities.size,
                         data_type=DataType.INT32)
        spec.write_array(
            self.__lat_distance_probabilities.view(dtype=np.uint16),
            data_type=DataType.UINT16)
        total_words_written += self.__lat_distance_probabilities.size // 2 + 1

        # Write post to pre table (inverse of synaptic matrix)
        self.__write_post_to_pre_table(spec, app_vertex, post_slice,
                                       machine_vertex, graph_mapper,
                                       pop_to_subpop_info, total_words_written)
Пример #55
0
 def get_children(self):
     'Return the Artists contained by the table'
     return list(six.itervalues(self._cells))
Пример #56
0
 def iterflatvalues(self):
     ''' Return iterator of flattened values '''
     return six.itervalues(self.flattened())
Пример #57
0
 def children(self):
     return itertools.chain(six.itervalues(self.params), self.ports())
Пример #58
0
 def scale(self, xscale, yscale):
     """ Scale column widths by xscale and row heights by yscale. """
     for c in six.itervalues(self._cells):
         c.set_width(c.get_width() * xscale)
         c.set_height(c.get_height() * yscale)
Пример #59
0
def process(players, track):
    """
    Evaluate players actions and update player state and track

    players: dict of player.Player objects
    track: track.Track object
    """

    # First handle right and left actions, since they may change in_lane
    # status, used for resolving collisions.

    for player in six.itervalues(players):
        if player.action == actions.LEFT:
            if player.x > 0:
                player.x -= 1
        elif player.action == actions.RIGHT:
            if player.x < config.matrix_width - 1:
                player.x += 1

    # Now handle obstacles, preferring players in their own lane.

    sorted_players = sorted(six.itervalues(players),
                            key=lambda p: 0 if p.in_lane() else 1)
    positions = set()

    for player in sorted_players:
        player.score += config.score_move_forward
        obstacle = track.get(player.x, player.y)
        if obstacle == obstacles.CRACK:
            if player.action != actions.JUMP:
                track.clear(player.x, player.y)
                player.y += 1
                player.score += config.score_move_backward * 2
            else:
                player.score += config.score_jump
        elif obstacle in (obstacles.TRASH, obstacles.BIKE, obstacles.BARRIER):
            if player.action not in (actions.LEFT, actions.RIGHT):
                track.clear(player.x, player.y)
                player.y += 1
                player.score += config.score_move_backward * 2
        elif obstacle == obstacles.WATER:
            if player.action != actions.BRAKE:
                track.clear(player.x, player.y)
                player.y += 1
                player.score += config.score_move_backward * 2
            else:
                player.score += config.score_brake
        elif obstacle == obstacles.PENGUIN:
            if player.action == actions.PICKUP:
                track.clear(player.x, player.y)
                player.score += config.score_move_forward

        # Here we can end the game when player gets out of
        # the track bounds. For now, just keep the player at the same
        # location.
        player.y = min(config.matrix_height - 1, max(2, player.y))

        # Finally forget action
        player.action = actions.NONE

        # Fix up collisions

        if (player.x, player.y) in positions:
            log.info('player %s collision at %d,%d', player.name, player.x,
                     player.y)
            player.score += config.score_move_backward
            if player.y < config.matrix_height - 1:
                player.y += 1
            elif player.x > 0:
                player.x -= 1
            elif player.x < config.matrix_width - 1:
                player.x += 1

        log.info(
            'process_actions: name=%s lane=%d pos=%d,%d score=%d '
            'response_time=%0.6f', player.name, player.lane, player.x,
            player.y, player.score, player.response_time)

        positions.add((player.x, player.y))
 def _get_ips(self, server):
     for network in six.itervalues(server['addresses']):
         for ip in network:
             yield ip