コード例 #1
0
    def _assign_general_arguments_to_config(
            self, general_arguments: Dict,
            attribute_name_to_config_name_mapping: Dict) -> None:
        """Assigns the values from general definitions to values within specific classes if the specific definition
        doesn't exist

        Args:
            general_arguments:
            attribute_name_to_config_name_mapping:

        Returns:
            None
        """
        for arg, value in general_arguments.items():
            config_name = attribute_name_to_config_name_mapping[arg]
            if config_name in self._arguments:
                # Specific arguments supersede general arguments
                if arg not in self._arguments[config_name]:
                    self._arguments[config_name][arg] = value
                else:
                    warn(
                        f"Ignoring general argument `{arg}` for config `{config_name}`\n"
                        f"Specific argument value preceded general arguments.",
                        SyntaxWarning,
                    )
            else:
                self._arguments[config_name] = {arg: value}
コード例 #2
0
ファイル: _importlib.py プロジェクト: TheVinhLuong102/pytorch
def _calc___package__(globals):
    """Calculate what __package__ should be.

    __package__ is not guaranteed to be defined or could be set to None
    to represent that its proper value is unknown.

    """
    package = globals.get("__package__")
    spec = globals.get("__spec__")
    if package is not None:
        if spec is not None and package != spec.parent:
            _warnings.warn(
                "__package__ != __spec__.parent "
                f"({package!r} != {spec.parent!r})",
                ImportWarning,
                stacklevel=3,
            )
        return package
    elif spec is not None:
        return spec.parent
    else:
        _warnings.warn(
            "can't resolve package from __spec__ or __package__, "
            "falling back on __name__ and __path__",
            ImportWarning,
            stacklevel=3,
        )
        package = globals["__name__"]
        if "__path__" not in globals:
            package = package.rpartition(".")[0]
    return package
コード例 #3
0
ファイル: ui.py プロジェクト: villalonreina/dipy
    def __build_icons(self, icon_fnames):
        """ Converts file names to vtkImageDataGeometryFilters.

        A pre-processing step to prevent re-read of file names during every state change.

        Parameters
        ----------
        icon_fnames : dict
            {iconname: filename, iconname: filename, ...}

        Returns
        -------
        icons : dict
            A dictionary of corresponding vtkImageDataGeometryFilters.

        """
        icons = {}
        for icon_name, icon_fname in icon_fnames.items():
            if icon_fname.split(".")[-1] not in ["png", "PNG"]:
                error_msg = "A specified icon file is not in the PNG format. SKIPPING."
                warn(Warning(error_msg))
            else:
                png = vtk.vtkPNGReader()
                png.SetFileName(icon_fname)
                png.Update()
                icons[icon_name] = png.GetOutput()

        return icons
コード例 #4
0
def result_fetch_numpy(result: monetdbe_result) -> Mapping[str, np.ndarray]:
    result_dict: Dict[str, np.ndarray] = {}
    for c in range(result.ncols):
        rcol = result_fetch(result, c)
        name = make_string(rcol.name)
        type_info = monet_c_type_map[rcol.type]

        np_mask = np.ma.nomask  # type: ignore[attr-defined]
        # for non float/int we for now first make a numpy object array which we then convert to the right numpy type
        if type_info.numpy_type.type == np.object_:
            values = [extract(rcol, r) for r in range(result.nrows)]
            np_col: np.ndarray = np.array(values)
            np_mask = np.array([v is None for v in values])
            if rcol.type == lib.monetdbe_str:
                np_col = np_col.astype(str)
            elif rcol.type == lib.monetdbe_date:
                np_col = np_col.astype('datetime64[D]')  # type: ignore
            elif rcol.type == lib.monetdbe_time:
                warn(
                    "Not converting column with type column since no proper numpy equivalent"
                )
            elif rcol.type == lib.monetdbe_timestamp:
                np_col = np_col.astype('datetime64[ms]')  # type: ignore
        else:
            buffer_size = result.nrows * type_info.numpy_type.itemsize  # type: ignore
            c_buffer = ffi.buffer(rcol.data, buffer_size)
            np_col = np.frombuffer(c_buffer,
                                   dtype=type_info.numpy_type)  # type: ignore
            np_mask = np_col == get_null_value(rcol)

        masked: np.ndarray = np.ma.masked_array(np_col, mask=np_mask)

        result_dict[name] = masked
    return result_dict
コード例 #5
0
    def test_empregador(self):
        empregador = self.relogio.get_empregador()
        self.assertTrue(type(empregador) == Empregador)
        if not TESTAR_ALTERACAO_EMPREGADOR:
            warn(
                '\nTestes de alteracao de empregador ignorados. Altere a variavel TESTAR_ALTERACAO_EMPREGADOR para executar estes testes.\n'
            )
        else:
            alterado_empregador = Empregador()
            alterado_empregador.razao_social = u'Teste'
            alterado_empregador.local = u'Teste local'
            alterado_empregador.documento = u'00.000.000/0000-00'
            alterado_empregador.tipo_documento = 1
            alterado_empregador.cei = u'00.000.00000/00'
            with self.assertRaises(RelogioPontoException):
                self.relogio.set_empregador(alterado_empregador)

            alterado_empregador.documento = u'26.347.567/0001-22'
            self.relogio.set_empregador(alterado_empregador)
            empregador_salvo = self.relogio.get_empregador()

            self.assertEqual(empregador_salvo.razao_social,
                             alterado_empregador.razao_social)
            self.assertEqual(empregador_salvo.local, alterado_empregador.local)
            self.assertEqual(empregador_salvo.documento,
                             alterado_empregador.documento)
            self.assertEqual(empregador_salvo.tipo_documento,
                             alterado_empregador.tipo_documento)
            self.assertEqual(empregador_salvo.cei, alterado_empregador.cei)
コード例 #6
0
    def result_fetch_numpy(monetdbe_result: ffi.CData) -> Mapping[str, np.ma.MaskedArray]:

        result = {}
        for c in range(monetdbe_result.ncols):
            rcol = Frontend.result_fetch(monetdbe_result, c)
            name = make_string(rcol.name)
            cast_string, cast_function, numpy_type, monetdbe_null = monet_numpy_map[rcol.type]

            # for non float/int we for now first make a numpy object array which we then convert to the right numpy type
            if numpy_type.type == np.object_:
                np_col: np.ndarray = np.array([extract(rcol, r) for r in range(monetdbe_result.nrows)])
                if rcol.type == lib.monetdbe_str:
                    np_col = np_col.astype(str)
                elif rcol.type == lib.monetdbe_date:
                    np_col = np_col.astype('datetime64[D]')  # type: ignore
                elif rcol.type == lib.monetdbe_time:
                    warn("Not converting column with type column since no proper numpy equivalent")
                elif rcol.type == lib.monetdbe_timestamp:
                    np_col = np_col.astype('datetime64[ns]')  # type: ignore
            else:
                buffer_size = monetdbe_result.nrows * numpy_type.itemsize  # type: ignore
                c_buffer = ffi.buffer(rcol.data, buffer_size)
                np_col = np.frombuffer(c_buffer, dtype=numpy_type)  # type: ignore

            if monetdbe_null:
                mask = np_col == monetdbe_null
            else:
                mask = np.ma.nomask  # type: ignore

            masked = np.ma.masked_array(np_col, mask=mask)

            result[name] = masked
        return result
コード例 #7
0
 def forward(self, x):
     if self.transform_input:
         x = x.clone()
         x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
     else: warn("Input isn't transformed")
     x = self.Conv2d_1a_3x3(x)
     x = self.Conv2d_2a_3x3(x)
     x = self.Conv2d_2b_3x3(x)
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     x = self.Conv2d_3b_1x1(x)
     x = self.Conv2d_4a_3x3(x)
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     x = self.Mixed_5b(x)
     x = self.Mixed_5c(x)
     x = self.Mixed_5d(x)
     x = self.Mixed_6a(x)
     x = self.Mixed_6b(x)
     x = self.Mixed_6c(x)
     x = self.Mixed_6d(x)
     x = self.Mixed_6e(x)
     x = self.Mixed_7a(x)
     x = self.Mixed_7b(x)
     x_for_attn = x = self.Mixed_7c(x)
     # 8 x 8 x 2048
     x = F.avg_pool2d(x, kernel_size=8)
     # 1 x 1 x 2048
     x_for_capt = x = x.view(x.size(0), -1)
     # 2048
     x = self.fc(x)
     # 1000 (num_classes)
     return x_for_attn, x_for_capt, x
コード例 #8
0
    def test_colaboradores(self):
        global TESTAR_INSERCAO_EXCLUSAO
        if not TESTAR_INSERCAO_EXCLUSAO:
            warn(
                '\nTestes de insercao e exclusao ignorados. Altere a variavel TESTAR_INSERCAO_EXCLUSAO para executar estes testes.\n'
            )
        else:
            if len(self.relogio.colaboradores.filter(matricula=112233)) > 0:
                self.t_apagarcolaborador()

            colaborador = Colaborador(self.relogio)
            colaborador.nome = u"TESTCASE Á"
            colaborador.pis = "5555.55555.55/5"
            colaborador.matriculas = [112233, 445566]
            colaborador.verificar_digital = True
            colaborador.save()
            self.assertNotEqual(colaborador.id, None)

            lista = self.relogio.colaboradores.filter(matricula=112233)
            self.assertEqual(len(lista), 1)

            colaborador_salvo = lista[0]
            self.assertNotEqual(colaborador_salvo.id, None)

            self.assertEqual(colaborador_salvo.nome, colaborador.nome)
            self.assertEqual(colaborador_salvo.pis, colaborador.pis)
            self.assertEqual(colaborador_salvo.matriculas,
                             colaborador.matriculas)
            self.assertEqual(colaborador_salvo.verificar_digital,
                             colaborador.verificar_digital)

            lista = self.relogio.colaboradores.all()
            self.assertTrue(len(lista) >= 1)
            self.t_apagarcolaborador()
コード例 #9
0
def check_domain_fit(points: np.ndarray):
    """ checks weather a given array of points is properly formatted and spans the standard domain [-1,1]^m

    :param points: ndarray of shape (m, k) with m being the dimensionality and k the amount of points
    :raises ValueError or TypeError when any of the criteria are not satisfied
    """
    # check first if the sample points are valid
    check_type_n_values(points)
    # check weather the points lie outside of the domain
    sample_max = np.max(points, axis=1)
    if not np.allclose(np.maximum(sample_max, 1.0), 1.0):
        raise ValueError(DOMAIN_WARN_MSG2 + f'violated max: {sample_max}')
    sample_min = np.min(points, axis=1)
    if not np.allclose(np.minimum(sample_min, -1.0), -1.0):
        raise ValueError(DOMAIN_WARN_MSG2 + f'violated min: {sample_min}')
    check_shape(points, dimensionality=2)
    nr_of_points, m = points.shape
    if nr_of_points == 0:
        raise ValueError('at least one point must be given')
    if nr_of_points == 1:
        return  # one point cannot span the domain
    if DEBUG:
        # check weather the points span the hole domain
        max_grid_val = np.max(sample_max)
        if not np.isclose(max_grid_val, 1.0):
            warn(
                f'the highest encountered value in the given points is {max_grid_val}  (expected 1.0). '
                + DOMAIN_WARN_MSG)
        min_grid_val = np.min(sample_min)
        if not np.isclose(min_grid_val, -1.0):
            warn(
                f'the smallest encountered value in the given points is {min_grid_val} (expected -1.0). '
                + DOMAIN_WARN_MSG)
コード例 #10
0
    def exportar(self, *args, **kwargs):
        relogio_banco = self.cleaned_data['relogio']
        relogio_rep = relogio_banco.get_rep()
        relogio_rep.conectar()
        salvos = []
        erros = []
        for colaborador in models.Colaborador.objects.all():
            colaborador_rep = relogioponto.base.Colaborador(relogio_rep)
            colaborador_rep.nome = colaborador.nome
            colaborador_rep.pis = colaborador.pis
            for m in colaborador.matriculas.values('numero'):
                colaborador_rep.matriculas.append(m['numero'])
            try:
                self._salvar_em_rep(colaborador_rep)
                salvos.append(colaborador)
            except HTTPError as httperr:
                warn(httperr.message)
                time.sleep(1)
                try:
                    relogio_rep.conectar()
                    self._salvar_em_rep(colaborador_rep)
                    salvos.append(colaborador)
                except Exception as ex:
                    erros.append("{0} - {1}".format(colaborador, ex.message))
            except Exception as e:
                erros.append("{0} - {1}".format(colaborador, e.message))

        return salvos, erros
コード例 #11
0
def create_signature(data: pd.DataFrame,
                     idx: np.array or None = None,
                     summary_method: callable or None = None) -> dict:
    """
    Given a dataframe of FCS events, generate a signature of those events; that is, a summary of the
    dataframes columns using the given summary method.

    Parameters
    ----------
    data: Pandas.DataFrame
    idx: Numpy.array (optional)
        Array of indexes to be included in this operation, if None, the whole dataframe is used
    summary_method: callable (optional)
        Function to use to summarise columns, defaults is Numpy.median
    Returns
    -------
    dict
        Dictionary representation of signature; {column name: summary statistic}
    """
    if data.shape[0] == 0:
        warn("Cannot generate signature for empty dataframe")
        return {}
    data = pd.DataFrame(scaler(data=data.values, scale_method="norm", return_scaler=False),
                        columns=data.columns,
                        index=data.index)
    if idx is None:
        idx = data.index.values
    # ToDo this should be more robust
    for x in ["Time", "time"]:
        if x in data.columns:
            data.drop(x, 1, inplace=True)
    summary_method = summary_method or np.median
    signature = data.loc[idx].apply(summary_method)
    return {x[0]: x[1] for x in zip(signature.index, signature.values)}
コード例 #12
0
ファイル: zipimport.py プロジェクト: za/cpython
    def find_loader(self, fullname, path=None):
        """find_loader(fullname, path=None) -> self, str or None.

        Search for a module specified by 'fullname'. 'fullname' must be the
        fully qualified (dotted) module name. It returns the zipimporter
        instance itself if the module was found, a string containing the
        full path name if it's possibly a portion of a namespace package,
        or None otherwise. The optional 'path' argument is ignored -- it's
        there for compatibility with the importer protocol.

        Deprecated since Python 3.10. Use find_spec() instead.
        """
        _warnings.warn(
            "zipimporter.find_loader() is deprecated and slated for "
            "removal in Python 3.12; use find_spec() instead",
            DeprecationWarning)
        mi = _get_module_info(self, fullname)
        if mi is not None:
            # This is a module or package.
            return self, []

        # Not a module or regular package. See if this is a directory, and
        # therefore possibly a portion of a namespace package.

        # We're only interested in the last path component of fullname
        # earlier components are recorded in self.prefix.
        modpath = _get_module_path(self, fullname)
        if _is_dir(self, modpath):
            # This is possibly a portion of a namespace
            # package. Return the string representing its path,
            # without a trailing separator.
            return None, [f'{self.archive}{path_sep}{modpath}']

        return None, []
コード例 #13
0
    def find_node(self,
                  ia_op_exec_context: InputAgnosticOperationExecutionContext,
                  tensor_metas: List[TensorMeta],
                  tm_comparators: List[TensorMetaComparator]) -> NNCFNode:
        op_exec_context = OperationExecutionContext(
            ia_op_exec_context.operator_name,
            ia_op_exec_context.scope_in_model,
            ia_op_exec_context.call_order,
            tensor_metas,
            tm_comparators=tm_comparators)
        nncf_node_candidates = []
        node_candidates = self._find_nodes_with_matching_context_and_inputs(
            op_exec_context)
        if not node_candidates:
            node_candidates = self._find_nodes_with_matching_context_among_inputless(
                op_exec_context)

        for nx_node in node_candidates.values():
            nncf_node_candidates.append(
                NNCFNode(nx_node[NNCFGraph.ID_NODE_ATTR], op_exec_context))
        result = None
        if len(nncf_node_candidates) == 1:
            result = nncf_node_candidates[0]
        if len(nncf_node_candidates) > 1:
            warn("More than one node matches input")
            result = nncf_node_candidates[0]

        return result
コード例 #14
0
 def register(self, name, val, ignore_if_exists=False):
     if name in self.data:
         if ignore_if_exists:
             return
         warn(f'"{name}" already defined in "{self.context}"',
              OverwrittenWarning)
     self.data[name] = val
     return self
コード例 #15
0
 def get_temp_file(self):
     if not self.__temporary_power_spectrum_file.closed:
         return self.__temporary_power_spectrum_file
     else:
         warnings.warn(
             "The power spectrum file has been closed and removed.",
             TemporaryFileClosedWarning)
         return None
コード例 #16
0
 def nextState(self, frontValue, backValue):
     if frontValue == 1 and backValue == 0:
         return self
     elif frontValue == 1 and backValue == 0:
         return WallCheckingStates_AllWall(self.dirCount)
     else:
         _warnings.warn('In State WALL: Invalid state transitions ',
                        frontValue, ' ', backValue)
         return self
コード例 #17
0
ファイル: _caching.py プロジェクト: janluke/iccas-python
    def get(self, relative_path, force_download: bool = False) -> Path:
        """
        Ensures the latest version of a remote file is available locally in the
        cache, downloading it only if needed.
        If no internet connection is available (or the server is unreachable),
        the file available in the cache is returned with a warning; if the file
        is not in the cache, a ``ConnectionError`` is raised.

        Args:
            relative_path:
            force_download:

        Returns:
            full local path of the file
        """
        path = self.get_path_of(relative_path)
        full_json = _read_json(self.metadata_path)
        entries = full_json[self.folder_url] if self.folder_url in full_json else {}
        if relative_path not in entries:
            entries[relative_path] = {}

        if (
            not force_download
            and path.exists()
            and entries[relative_path].get("creationTime") == os.path.getmtime(path)
        ):
            current_etag = entries[relative_path].get("ETag", "")
        else:
            current_etag = ""

        try:
            url = urljoin(self.folder_url, relative_path)
            resp = _download_if_modified(url, path, current_etag)
            if resp:
                entries[relative_path] = {
                    "ETag": resp.headers.get("ETag"),
                    "creationTime": os.path.getmtime(path),
                }
                full_json[self.folder_url] = entries
                _write_json(self.metadata_path, full_json)
        except requests.exceptions.ConnectionError as exc:
            if force_download:
                raise ConnectionError(
                    f"set force_download=False to use the local cached version"
                    f"of the file (if available).\nError details: {exc}"
                )
            if path.exists():
                warn(
                    f"Could not check if the remote file was modified: {exc}\n"
                    f"However, a file is available in the local cache."
                )
            else:
                raise ConnectionError(
                    "unable to download the dataset and no dataset was "
                    "previously downloaded.\nDetails: {}".format(exc)
                )
        return path
コード例 #18
0
ファイル: carambola2.py プロジェクト: ks156/IoTPy
    def __enter__(self):
        self._caramiot = None
        try:
            self._caramiot_file = open("/dev/caramiot", "r")
            self._caramiot = Caramiot(self._caramiot_file)
        except IOError:
            warn("Could not load caramiot driver. You will not be able to use some of the Carambola2 features.")

        return self
コード例 #19
0
    def find_node(self,
                  ia_op_exec_context: InputAgnosticOperationExecutionContext,
                  tensor_metas: List[TensorMeta],
                  tm_comparators: List[TensorMetaComparator]) -> NNCFNode:
        nncf_node_candidates = []
        iter_scopes = self._get_iteration_scopes(
            ia_op_exec_context.scope_in_model)
        # compare meta information about first input nodes during the matching. During the iteration some nodes may
        # change number of inputs, e.g. on concat of hidden outputs
        input_matcher = FirstInputsMatcher()
        op_exec_context = OperationExecutionContext(
            ia_op_exec_context.operator_name,
            ia_op_exec_context.scope_in_model,
            ia_op_exec_context.call_order,
            tensor_metas,
            input_matcher=input_matcher,
            tm_comparators=tm_comparators)
        node_candidates = self._find_nodes_with_matching_context_and_inputs(
            op_exec_context)
        if not node_candidates:
            op_exec_context = OperationExecutionContext(
                ia_op_exec_context.operator_name,
                ia_op_exec_context.scope_in_model,
                ia_op_exec_context.call_order,
                tensor_metas,
                tm_comparators=tm_comparators)
            node_candidates = self._find_nodes_with_matching_context_among_inputless(
                op_exec_context)
            if not node_candidates and iter_scopes:
                # ignore information about node creator and index of input
                comparators = tm_comparators + [
                    ShapeOnlyTensorMetaComparator()
                ]
                op_exec_context = OperationExecutionContext(
                    ia_op_exec_context.operator_name,
                    ia_op_exec_context.scope_in_model,
                    ia_op_exec_context.call_order,
                    tensor_metas,
                    tm_comparators=comparators)
                # match with starting points of iteration
                iter_nodes = self._match_first_iteration_nodes(
                    op_exec_context, iter_scopes)
                for node in iter_nodes.items():
                    nncf_node_candidates.append(node[1])

        for nx_node in node_candidates.values():
            nncf_node_candidates.append(
                NNCFNode(nx_node[NNCFGraph.ID_NODE_ATTR], op_exec_context))

        result = None
        if len(nncf_node_candidates) == 1:
            result = nncf_node_candidates[0]
        if len(nncf_node_candidates) > 1:
            warn("More than one node matches input")
            result = nncf_node_candidates[0]

        return result
コード例 #20
0
ファイル: java.py プロジェクト: msimacek/graalpython
 def is_java_package(name):
     try:
         package = type("java.lang.Package")
         return any(p.getName().startswith(name) for p in package.getPackages())
     except KeyError:
         if sys.flags.verbose:
             from _warnings import warn
             warn("Host lookup allowed, but java.lang.Package not available. Importing from Java cannot work.")
         return False
コード例 #21
0
def warnpy3k(message, category=None, stacklevel=1):
    """Issue a deprecation warning for Python 3.x related changes.

    Warnings are omitted unless Python is started with the -3 option.
    """
    if sys.py3kwarning:
        if category is None:
            category = DeprecationWarning
        warn(message, category, stacklevel + 1)
コード例 #22
0
 def array_representation(self) -> ARRAY:
     if self._array_representation is None:
         warn(
             'building a full transformation matrix from a barycentric transformation. this is inefficient.'
         )
         # NOTE: 'self' arg must not be passed to the the merging fcts (@staticmethod)
         full_array = self.__class__.merging_fct(*self.transformation_data)
         self._array_representation = full_array
     return self._array_representation
コード例 #23
0
def warnpy3k(message, category=None, stacklevel=1):
    """Issue a deprecation warning for Python 3.x related changes.

    Warnings are omitted unless Python is started with the -3 option.
    """
    if sys.py3kwarning:
        if category is None:
            category = DeprecationWarning
        warn(message, category, stacklevel + 1)
コード例 #24
0
ファイル: carambola2.py プロジェクト: praba230890/IoTPy
    def __enter__(self):
        self._caramiot = None
        try:
            self._caramiot_file = open("/dev/caramiot", "r")
            self._caramiot = Caramiot(self._caramiot_file)
        except IOError:
            warn(
                "Could not load caramiot driver. You will not be able to use some of the Carambola2 features."
            )

        return self
コード例 #25
0
 def knn_distance_callback(ch, method, properties, body):
     print(' [x] Received %r from %r' %
           (method.routing_key, knn_queue))
     warn(('Need to do the work in the main thread. '
           'Therefore, acked the message before processing it '
           'and temporarily disconnected from RabbitMQ. '
           'This will block all other activity.'))
     channel.basic_ack(delivery_tag=method.delivery_tag)
     connection.close()
     image = KNeighbors(body).distance_plot()
     Producer(method.routing_key, rabbitmq_host,
              rabbitmq_port).send_knn_image(image)
コード例 #26
0
def setup_mpi_threads():
    thead_level = setup_mpi_threads.thead_level
    if thead_level is None:
        thead_level = MPI.Query_thread()
        setup_mpi_threads.thead_level = thead_level
        if thead_level < MPI.THREAD_MULTIPLE:  # pragma: no cover
            serialized.lock = threading.Lock()
    if thead_level < MPI.THREAD_SERIALIZED:  # pragma: no cover
        from _warnings import warn
        warn(
            "The level of thread support in MPI "
            "should be at least MPI_THREAD_SERIALIZED", RuntimeWarning, 2)
コード例 #27
0
    def test_hora(self):
        if not TESTAR_ALTERACAO_DATAHORA:
            warn(
                '\nTestes de alteracao de data e hora ignorados. Altere a variavel TESTAR_ALTERACAO_DATAHORA para executar estes testes.\n'
            )

        else:
            data_relogio = self.relogio.data_hora
            self.assertEqual(type(data_relogio), datetime)
            agora = datetime.now()
            self.relogio.data_hora = agora
            data_relogio = self.relogio.data_hora
            delta = data_relogio - agora
            self.assertTrue(delta.seconds >= -10 and delta.seconds <= 10)
コード例 #28
0
def get_collision(A, B, collision_class=Collision):
    """
    Return a collision object between A and B or None if there is no
    superposition.

    This is a multi-dispatch function. Derived classes must implement each
    collision pair or else objects will not collide.
    """

    tA = type(A).__name__
    tB = type(B).__name__
    warn('no collision defined for: (%s, %s)' % (tA, tB))

    return None
コード例 #29
0
ファイル: collision.py プロジェクト: gutioliveira/FGAme
def get_collision(A, B, collision_class=Collision):
    """
    Return a collision object between A and B or None if there is no
    superposition.

    This is a multi-dispatch function. Derived classes must implement each
    collision pair or else objects will not collide.
    """

    tA = type(A).__name__
    tB = type(B).__name__
    warn('no collision defined for: (%s, %s)' % (tA, tB))

    return None
コード例 #30
0
ファイル: utils.py プロジェクト: parkus/muscles
def rebin(spec, newbins):
    """Rebin the spectrum, dealing with gaps in newbins if appropriate."""

    # get overlapping bins, warn if some don't overlap
    _, overnew = argoverlap(spec, newbins, method='tight')
    Nkeep = np.sum(overnew)
    if Nkeep == 0:
        warn('All newbins fall outside of spec. Returning empty spectrum.')
        return spec[0:0]
    if Nkeep < len(newbins):
        warn('Some newbins fall outside of spec and will be discarded.')
    newbins = newbins[overnew]

    # split at gaps and rebin. no bins covering a gap in spec should remain in
    # newbins, so there shouldn't be a need to split newgaps
    splitbins = gapsplit(newbins)
    if len(splitbins) > 1:
        specs = []
        for bins in splitbins:
            trim = keepranges(spec, bins[0, 0], bins[-1, 1], ends='loose')
            specs.append(rebin(trim, bins))
        return vstack(specs)

    # trim down spec to avoid gaps (gaps are handled in code block above)
    spec = keepranges(spec, newbins[0, 0], newbins[-1, 1], ends='loose')

    # rebin
    w0, w1 = newbins.T
    newedges = bins2edges(newbins)
    oldedges = wedges(spec)
    flux, error, flags = specutils.rebin(newedges, oldedges, spec['flux'],
                                         spec['error'], spec['flags'])
    insts = mnp.rebin(newedges, oldedges, spec['instrument'], 'or')
    normfac = mnp.rebin(newedges, oldedges, spec['normfac'], 'avg')
    start = mnp.rebin(newedges, oldedges, spec['minobsdate'], 'min')
    end = mnp.rebin(newedges, oldedges, spec['maxobsdate'], 'max')
    expt = mnp.rebin(newedges, oldedges, spec['exptime'], 'avg')

    newspec = vecs2spectbl(w0, w1, flux, error, expt, flags, insts, normfac,
                           start, end)
    newspec.meta = spec.meta
    if 'flux_photon' in spec.colnames:
        pflux, perror, _ = specutils.rebin(newedges, oldedges,
                                           spec['flux_photon'],
                                           spec['flux_photon_err'],
                                           spec['flags'])
        newspec['flux_photon'] = pflux
        newspec['flux_photon_err'] = perror
    return newspec
コード例 #31
0
def _warn_unawaited_coroutine(coro):
    from _warnings import warn
    msg_lines = [f"coroutine '{coro.__qualname__}' was never awaited\n"]
    if coro.cr_origin is not None:
        import linecache, traceback

        def extract():
            for filename, lineno, funcname in reversed(coro.cr_origin):
                line = linecache.getline(filename, lineno)
                yield (filename, lineno, funcname, line)

        msg_lines.append("Coroutine created at (most recent call last)\n")
        msg_lines += traceback.format_list(list(extract()))
    msg = "".join(msg_lines).rstrip("\n")
    warn(msg, category=RuntimeWarning, stacklevel=2, source=coro)
コード例 #32
0
    def find_module(self, fullname, path=None):
        """find_module(fullname, path=None) -> self or None.

        Search for a module specified by 'fullname'. 'fullname' must be the
        fully qualified (dotted) module name. It returns the zipimporter
        instance itself if the module was found, or None if it wasn't.
        The optional 'path' argument is ignored -- it's there for compatibility
        with the importer protocol.

        Deprecated since Python 3.10. Use find_spec() instead.
        """
        _warnings.warn("zipimporter.find_module() is deprecated and slated for "
                       "removal in Python 3.12; use find_spec() instead",
                       DeprecationWarning)
        return self.find_loader(fullname, path)[0]
コード例 #33
0
 def visualize_graph(self, path):
     out_graph = nx.DiGraph()
     for node_name, node in self._nx_graph.nodes.items():
         op_exec_context = node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR]
         scope_str = str(op_exec_context.scope_in_model)
         out_graph.add_node(node_name, type=op_exec_context.operator_name,
                            id=node[NNCFGraph.ID_NODE_ATTR],
                            scope=scope_str)
     for u, v in self._nx_graph.edges:
         out_graph.add_edge(u, v, label=self._nx_graph.edges[u, v][NNCFGraph.ACTIVATION_SHAPE_EDGE_ATTR])
     try:
         A = to_agraph(out_graph)
         A.layout('dot')
         A.draw(path)
     except ImportError:
         warn("Graphviz is not installed - no graph visualization will be done")
コード例 #34
0
ファイル: population.py プロジェクト: ritika-giri/CytoPy
def _merge_thresholds(left: Population, right: Population,
                      new_population_name: str):
    """
    Merge two Populations with ThresholdGeom geometries.

    Parameters
    ----------
    left: Population
    right: Population
    new_population_name: str

    Returns
    -------
    Population
    """
    assert left.geom.x_threshold == right.geom.x_threshold, \
        "Threshold merge assumes that the populations are derived " \
        "from the same gate; X threshold should match between populations"
    assert left.geom.y_threshold == right.geom.y_threshold, \
        "Threshold merge assumes that the populations are derived " \
        "from the same gate; Y threshold should match between populations"
    if left.clusters or right.clusters:
        warn(
            "Associated clusters are now void. Repeat clustering on new population"
        )
        left.clusters, right_clusters = [], []
    if len(left.ctrl_index) > 0 or len(right.ctrl_index) > 0:
        warn(
            "Associated control indexes are now void. Repeat control gating on new population"
        )
    new_geom = ThresholdGeom(x=left.geom.x,
                             y=left.geom.y,
                             transform_x=left.geom.transform_x,
                             transform_y=left.geom.transform_y,
                             x_threshold=left.geom.x_threshold,
                             y_threshold=left.geom.y_threshold)

    new_population = Population(
        population_name=new_population_name,
        n=len(left.index) + len(right.index),
        parent=left.parent,
        warnings=left.warnings + right.warnings + ["MERGED POPULATION"],
        index=_merge_index(left, right),
        geom=new_geom,
        definition=",".join([left.definition, right.definition]),
        signature=_merge_signatures(left, right))
    return new_population
コード例 #35
0
def center_crop_generator(generator, output_size):
    warn("using deprecated generator center_crop_generator", Warning)
    '''
    yields center crop of size output_size (may be int or tuple) from data and seg
    '''
    for data_dict in generator:
        assert "data" in list(
            data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"
        data = data_dict["data"]
        seg = None
        if "seg" in list(data_dict.keys()):
            seg = data_dict["seg"]
        data, seg = center_crop(data, output_size, seg)
        data_dict["data"] = data
        if seg is not None:
            data_dict["seg"] = seg
        yield data_dict
コード例 #36
0
def center_crop_seg_generator(generator, output_size):
    warn("using deprecated generator center_crop_seg_generator", Warning)

    '''
    yields center crop of size output_size (from seg (forwards data with size unchanged). This generator is used if the
    output shape of a segmentation network is different from the input shape (f. ex when unpadded convolutions are used)
    '''
    for data_dict in generator:
        do_seg = False
        seg = None
        if "seg" in list(data_dict.keys()):
            seg = data_dict["seg"]
            do_seg = True
        if not do_seg:
            Warning("You used center_crop_seg_generator but there is no 'seg' key in your data_dict")
            yield data_dict
        data_dict["seg"] = center_crop_seg(seg, output_size)
        yield data_dict
コード例 #37
0
ファイル: c_motion.py プロジェクト: aclevine/ISO-space
    def is_move_link(self):
#         return True
        trigger_tag, _, to_tag = self.token
        links = self.document.query_links(['MOVELINK'], trigger_tag['id'])
        if links:
            link = links[0]
            try:
                link_to_tag = self.document.query(link['mover'])
            except KeyError:
                warning = "malformed MOVELINK {} tag in {}".format(link['id'], self.document.basename)
                warn(warning, RuntimeWarning)
                link_to_tag = self.document.query('')
            except Exception as e:
                raise e
            if link_to_tag:
                if link_to_tag['start'] == to_tag['start'] and link_to_tag['end'] == to_tag['end']:
                    return True
        return False
コード例 #38
0
ファイル: dumpsys.py プロジェクト: dtmilano/AndroidViewClient
 def __init__(self, adbclient, subcommand, *args):
     self.nativeHeap = -1
     self.dalvikHeap = -1
     self.total = 0
     self.views = -1
     self.activities = -1
     self.appContexts = -1
     self.viewRootImpl = -1
     self.gfxProfileData = []
     self.framestats = []
     if args:
         args_str = ' '.join(args)
     else:
         args_str = ''
     if adbclient:
         cmd = 'dumpsys ' + subcommand + (' ' + args_str if args_str else '')
         self.parse(adbclient.shell(cmd), subcommand, *args)
     else:
         warn('No adbclient specified')
コード例 #39
0
 def is_olink(self):
     trigger_tag, from_tag, to_tag = self.token
     links = self.document.query_links(['OLINK'], trigger_tag['id'])
     if links:
         link = links[0]
         try:
             link_from_tag = self.document.query(link['fromID'])
             link_to_tag = self.document.query(link['toID'])
         except KeyError:
             warning = "malformed OLINK {} tag in {}".format(link['id'], self.document.basename)
             warn(warning, RuntimeWarning)
             return False
         except Exception as e:
             raise e                
         if link_from_tag and link_to_tag:
             if link_to_tag['start'] == to_tag['start'] and link_to_tag['end'] == to_tag['end'] and \
             link_from_tag['start'] == from_tag['start'] and link_from_tag['end'] == from_tag['end']:
                 return True
     return False
コード例 #40
0
def pad_generator(generator, new_size, pad_value_data=None, pad_value_seg=None):
    warn("using deprecated generator pad_generator", Warning)

    '''
    pads data and seg with value pad_value so that the images have the size new_size
    if pad_value is None then the value of img[0,0] is taken (for each channel in each sample in the minibatch separately), same with seg
    '''
    for data_dict in generator:
        assert "data" in list(
            data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"
        data = data_dict["data"]
        seg = None
        if "seg" in list(data_dict.keys()):
            seg = data_dict["seg"]
        data, seg = pad(data, new_size, seg, pad_value_data, pad_value_seg)
        if seg is not None:
            data_dict["seg"] = seg
        data_dict["data"] = data
        yield data_dict
コード例 #41
0
def random_crop_generator(generator, crop_size=128, margins=(0, 0, 0)):
    warn("using deprecated generator random_crop_generator", Warning)

    '''
    yields a random crop of size crop_size, crop_size may be a tuple with one entry for each dimension of your data (2D/3D)
    :param margins: allows to give cropping margins measured symmetrically from the image boundaries, which
    restrict the 'box' from which to randomly crop
    '''
    for data_dict in generator:
        assert "data" in list(
            data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"
        data = data_dict["data"]
        seg = None
        if "seg" in list(data_dict.keys()):
            seg = data_dict["seg"]
        data, seg = random_crop(data, seg, crop_size, margins)
        data_dict["data"] = data
        if seg is not None:
            data_dict["seg"] = seg
        yield data_dict
コード例 #42
0
ファイル: helper.py プロジェクト: micgro42/ReceiptEval
def validate_date(date_text):
    '''
    validate that the parameter is actually a date string and return the date
    throw an exception otherwise
    :param date_text:
    '''
    if isinstance(date_text, date):
        return date_text
    try:
        valid_date = datetime.strptime(date_text, '%Y-%m-%d')
    except ValueError:
        try:
            valid_date = datetime.strptime(date_text, '%d.%m.%y')
        except ValueError:
            raise ValueError("Incorrect data format, should be YYYY-MM-DD" +
                             " but is " + date_text)
        else:
            warn('The date format is ambiguous and should be avoided: ' +
                 date_text, RuntimeWarning)
    return valid_date
コード例 #43
0
ファイル: warnings.py プロジェクト: DamirAinullin/PTVS
def _warn_unawaited_coroutine(coro):
    msg_lines = [
        f"coroutine '{coro.__qualname__}' was never awaited\n"
    ]
    if coro.cr_origin is not None:
        import linecache, traceback
        def extract():
            for filename, lineno, funcname in reversed(coro.cr_origin):
                line = linecache.getline(filename, lineno)
                yield (filename, lineno, funcname, line)
        msg_lines.append("Coroutine created at (most recent call last)\n")
        msg_lines += traceback.format_list(list(extract()))
    msg = "".join(msg_lines).rstrip("\n")
    # Passing source= here means that if the user happens to have tracemalloc
    # enabled and tracking where the coroutine was created, the warning will
    # contain that traceback. This does mean that if they have *both*
    # coroutine origin tracking *and* tracemalloc enabled, they'll get two
    # partially-redundant tracebacks. If we wanted to be clever we could
    # probably detect this case and avoid it, but for now we don't bother.
    warn(msg, category=RuntimeWarning, stacklevel=2, source=coro)
コード例 #44
0
ファイル: evaluate_vsd.py プロジェクト: emilholmegaard/nilmtk
    def evaluate(self):
        """
        Parameters
        ----------
        
        Returns
        -------

        Raises
        ------
        """
        
        for meter in self.metergroup.meters:
            e = Evaluate_Meter_VSD(meter)
            if e.is_vsd():
                warn('Meter: {} is a possible variable speed drive.'.format(meter), RuntimeWarning)
                
            if self.path:
                if meter.is_site_meter():
                    e.plot(self.path+'/meter_main.png')
                else:
                    e.plot(self.path+'/meter_{}.png'.format(meter))
コード例 #45
0
    def test_sanity(self):
        global EXPECTED
        try:
            with stderr_trapper() as output:
                # generate test output
                _warnings.warn("Warning Message!")
                expect(UserWarning, "Warning Message!")
                for warn_type in WARN_TYPES:
                    _warnings.warn(warn_type("Type-overriding message!"), UnicodeWarning)
                    expect(warn_type, "Type-overriding message!")
                    _warnings.warn("Another Warning Message!", warn_type)
                    expect(warn_type, "Another Warning Message!")
                    _warnings.warn_explicit("Explicit Warning!", warn_type, "nonexistent_file.py", 12)
                    expect(warn_type, "Explicit Warning!")
                    _warnings.warn_explicit("Explicit Warning!", warn_type, "test_python26.py", 34)
                    expect(warn_type, "Explicit Warning!")
                    _warnings.warn_explicit("Explicit Warning!", warn_type, "nonexistent_file.py", 56, "module.py")
                    expect(warn_type, "Explicit Warning!")
                    _warnings.warn_explicit("Explicit Warning!", warn_type, "test_python26.py", 78, "module.py")
                    expect(warn_type, "Explicit Warning!")

            temp_messages = output.messages

            #No point in going further if the number of lines is not what we expect
            nlines = len([x for x in temp_messages if not x.startswith("  ")])
            self.assertEqual(nlines, len(EXPECTED))

            # match lines
            for line in temp_messages:
                if line.startswith("  "):
                    continue
                temp = EXPECTED.pop(0).rstrip()
                self.assertTrue(line.endswith(temp), str(line) + " does not end with " + temp)

        finally:
            # remove generated files
            cleanup()
コード例 #46
0
ファイル: warnings.py プロジェクト: 0-T-0/TACTIC
def warn_explicit(message, category, filename, lineno,
                  module=None, registry=None, module_globals=None):
    lineno = int(lineno)
    if module is None:
        module = filename or "<unknown>"
        if module[-3:].lower() == ".py":
            module = module[:-3] # XXX What about leading pathname?
    if registry is None:
        registry = {}
    if isinstance(message, Warning):
        text = str(message)
        category = message.__class__
    else:
        text = message
        message = category(message)
    key = (text, category, lineno)
    # Quick test for common case
    if registry.get(key):
        return
    # Search the filters
    for item in filters:
        action, msg, cat, mod, ln = item
        if ((msg is None or msg.match(text)) and
            issubclass(category, cat) and
            (mod is None or mod.match(module)) and
            (ln == 0 or lineno == ln)):
            break
    else:
        action = defaultaction
    # Early exit actions
    if action == "ignore":
        registry[key] = 1
        return

    # Prime the linecache for formatting, in case the
    # "file" is actually in a zipfile or something.
    linecache.getlines(filename, module_globals)

    if action == "error":
        raise message
    # Other actions
    if action == "once":
        registry[key] = 1
        oncekey = (text, category)
        if onceregistry.get(oncekey):
            return
        onceregistry[oncekey] = 1
    elif action == "always":
        pass
    elif action == "module":
        registry[key] = 1
        altkey = (text, category, 0)
        if registry.get(altkey):
            return
        registry[altkey] = 1
    elif action == "default":
        registry[key] = 1
    else:
        # Unrecognized actions are errors
        raise RuntimeError(
              "Unrecognized action (%r) in warnings.filters:\n %s" %
              (action, item))
    # Warn if showwarning() does not support the 'line' argument.
    # Don't use 'inspect' as it relies on an extension module, which break the
    # build thanks to 'warnings' being imported by setup.py.
    fxn_code = None
    if hasattr(showwarning, 'func_code'):
        fxn_code = showwarning.func_code
    elif hasattr(showwarning, '__func__'):
        fxn_code = showwarning.__func__.func_code
    if fxn_code:
        args = fxn_code.co_varnames[:fxn_code.co_argcount]
        CO_VARARGS = 0x4
        if 'line' not in args and not fxn_code.co_flags & CO_VARARGS:
            showwarning_msg = ("functions overriding warnings.showwarning() "
                                "must support the 'line' argument")
            if message == showwarning_msg:
                _show_warning(message, category, filename, lineno)
            else:
                warn(showwarning_msg, DeprecationWarning)
    # Print message and context
    showwarning(message, category, filename, lineno)
コード例 #47
0
ファイル: warnings_test.py プロジェクト: ChinaQuants/pyston
import warnings
import _warnings

print "__warningregistry__" in globals()

warnings.filterwarnings('error')

try:
    warnings.warn("hello world", Warning)
except Warning as w:
    print(w.args[0])

try:
    _warnings.warn("deperecated", Warning)
except Warning as w:
    print(w.args[0])

print "__warningregistry__" in globals()
コード例 #48
0
ファイル: atom_finding.py プロジェクト: pycroscopy/pycroscopy
def fit_atom_pos(single_parm):
    """
    Fits the position of a single atom.

    Parameters
    ----------
    single_parm : tuple
        atom_ind : unsigned integer
            Index of the atom being fitted
        parm_dict : dictionary
            Dictionary containing all the guess values, table of nearest neighbors for each atom, and the original image
        fitting_parms : dictionary
            Dictionary of the many fitting parameters

    Returns
    -------
    coef_guess_mat : 2D numpy array
        guess coefficients for the set of N atoms
    coef_fit_mat : 2D numpy array
        Fit coefficients for the set of N atoms

    This function also returns all intermediate results for debugging purposes if parm_dict['verbose']=True
    """
    atom_ind = single_parm[0]
    parm_dict = single_parm[1]
    fitting_parms = single_parm[2]

    all_atom_guesses = parm_dict['atom_pos_guess']
    closest_neighbors_mat = parm_dict['nearest_neighbors']
    cropped_clean_image = parm_dict['cropped_cleaned_image']

    fit_region_size = fitting_parms['fit_region_size']
    gauss_width_guess = fitting_parms['gauss_width_guess']
    num_nearest_neighbors = fitting_parms['num_nearest_neighbors']
    min_amplitude = fitting_parms['min_amplitude']
    max_amplitude = fitting_parms['max_amplitude']
    position_range = fitting_parms['position_range']
    max_function_evals = fitting_parms['max_function_evals']
    min_gauss_width_ratio = fitting_parms['min_gauss_width_ratio']
    max_gauss_width_ratio = fitting_parms['max_gauss_width_ratio']
    verbose = False
    if 'verbose' in parm_dict:
        verbose = parm_dict['verbose']

    x_center_atom = all_atom_guesses[atom_ind, 0]
    y_center_atom = all_atom_guesses[atom_ind, 1]
    x_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 0]
    y_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 1]
    x_range = slice(max(int(np.round(x_center_atom - fit_region_size)), 0),
                    min(int(np.round(x_center_atom + fit_region_size)),
                        cropped_clean_image.shape[0]))
    y_range = slice(max(int(np.round(y_center_atom - fit_region_size)), 0),
                    min(int(np.round(y_center_atom + fit_region_size)),
                        cropped_clean_image.shape[1]))

    will_fail = False
    # Stephen says that it does not matter if guesses are outside but the fit does not work
    # well when guesses are outside the window
    x_outside = np.hstack((np.where(x_neighbor_atoms < x_range.start)[0],
                           np.where(x_neighbor_atoms > x_range.stop)[0]))
    y_outside = np.hstack((np.where(y_neighbor_atoms < y_range.start)[0],
                           np.where(y_neighbor_atoms > y_range.stop)[0]))
    guesses_outside = np.unique(np.hstack((x_outside, y_outside)))
    if guesses_outside.size >= 0.5 * num_nearest_neighbors:
        if verbose:
            warn('Atom {}: Too few ({} of {}) neighbors within window to fit'.format(atom_ind, num_nearest_neighbors -
                                                                                     guesses_outside.size,
                                                                                     num_nearest_neighbors))
        will_fail = True
    else:
        guesses_inside = np.invert(np.in1d(np.arange(num_nearest_neighbors), guesses_outside))
        x_neighbor_atoms = x_neighbor_atoms[guesses_inside]
        y_neighbor_atoms = y_neighbor_atoms[guesses_inside]
        num_nearest_neighbors = x_neighbor_atoms.size

    fit_region = cropped_clean_image[x_range, y_range]

    # define x and y fitting range
    s1, s2 = np.meshgrid(range(x_range.start, x_range.stop),
                         range(y_range.start, y_range.stop))
    s_mat = np.dstack((s1.T, s2.T))

    # initial guess values
    x_guess = np.hstack((x_center_atom, x_neighbor_atoms))
    y_guess = np.hstack((y_center_atom, y_neighbor_atoms))
    a_guess = cropped_clean_image[np.uint32(x_guess), np.uint32(y_guess)]
    sigma_guess = gauss_width_guess * np.ones(num_nearest_neighbors + 1)

    coef_guess_mat = np.transpose(np.vstack((a_guess, x_guess,
                                             y_guess, sigma_guess)))
    # Set up upper and lower bounds:
    lb_mat = [min_amplitude * np.ones(num_nearest_neighbors + 1),
              coef_guess_mat[:, 1] - position_range,
              coef_guess_mat[:, 2] - position_range,
              min_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]

    ub_mat = [max_amplitude * np.ones(num_nearest_neighbors + 1),
              coef_guess_mat[:, 1] + position_range,
              coef_guess_mat[:, 2] + position_range,
              max_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]
    lb_mat = np.transpose(lb_mat)
    ub_mat = np.transpose(ub_mat)

    if will_fail:
        coef_fit_mat = coef_guess_mat
        plsq = None
    else:
        # Now refine the positions!

        def gauss_2d_residuals(parms_vec, orig_data_mat, x_data_mat):
            """
            Calculates the residual
            Parameters
            ----------
            parms_vec : 1D numpy array
                Raveled version of the parameters matrix
            orig_data_mat : 2D numpy array
                Section of the image being fitted
            x_data_mat : 3D numpy array

            Returns
            -------
            err_vec : 1D numpy array
                Difference between the original data and the matrix obtained by evaluating parms_vec with x_data_mat
            """
            # Only need to reshape the parms from 1D to 2D
            parms_mat = np.reshape(parms_vec, (-1, 4))

            err = orig_data_mat - multi_gauss_surface_fit(parms_mat, x_data_mat)
            return err.ravel()

        plsq = least_squares(gauss_2d_residuals,
                             coef_guess_mat.ravel(),
                             args=(fit_region, s_mat),
                             bounds=(lb_mat.ravel(), ub_mat.ravel()),
                             jac='2-point', max_nfev=max_function_evals)
        coef_fit_mat = np.reshape(plsq.x, (-1, 4))

    if verbose:
        return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq
    else:
        return coef_guess_mat, coef_fit_mat
コード例 #49
0
ファイル: test_warnings.py プロジェクト: Qointum/pypy
 def test_filename_none(self):
     import _warnings
     globals()['__file__'] = 'test.pyc'
     _warnings.warn('test', UserWarning)
     globals()['__file__'] = None
     _warnings.warn('test', UserWarning)
コード例 #50
0
def warnpy3k(message, category = None, stacklevel = 1):
    if sys.py3kwarning:
        if category is None:
            category = DeprecationWarning
        warn(message, category, stacklevel + 1)
コード例 #51
0
ファイル: test_warnings.py プロジェクト: Debug-Orz/Sypy
 def test_warn(self):
     import _warnings
     _warnings.warn("some message", DeprecationWarning)
     _warnings.warn("some message", Warning)
コード例 #52
0
ファイル: test_warnings.py プロジェクト: Debug-Orz/Sypy
 def test_lineno(self):
     import warnings, _warnings, sys
     with warnings.catch_warnings(record=True) as w:
         _warnings.warn("some message", Warning)
         lineno = sys._getframe().f_lineno - 1 # the line above
         assert w[-1].lineno == lineno