コード例 #1
0
    def __init__(self,
                 k: int = 10,
                 query_model: Optional[tf.keras.Model] = None,
                 nlist: Optional[int] = 1,
                 nprobe: Optional[int] = 1,
                 normalize: bool = False,
                 *args,
                 **kwargs):
        super().__init__(k, *args, **kwargs)

        self._query_model = query_model
        self._nlist = nlist
        self._nprobe = nprobe
        self._normalize = normalize

        mkl.get_max_threads()

        def build_searcher(
            candidates: Union[np.ndarray, tf.Tensor],
            identifiers: Optional[Union[np.ndarray, tf.Tensor]] = None,
        ) -> Union[faiss.swigfaiss.IndexIDMap, faiss.swigfaiss.IndexIVFFlat]:

            if isinstance(candidates, tf.Tensor):
                candidates = candidates.numpy()

            if candidates.dtype != "float32":
                candidates = candidates.astype(np.float32)

            d = candidates.shape[1]
            quantizer = faiss.IndexFlatIP(d)
            index = faiss.IndexIVFFlat(quantizer, d, self._nlist,
                                       faiss.METRIC_INNER_PRODUCT)
            if self._normalize is True:
                faiss.normalize_L2(candidates)
            index.train(candidates)  # pylint: disable=no-value-for-parameter

            if identifiers is not None:
                if isinstance(identifiers, tf.Tensor):
                    identifiers = identifiers.numpy()
                if identifiers.dtype != np.int64:
                    try:
                        identifiers = identifiers.astype(np.int64)
                    except:
                        raise ValueError("`identifiers` dtype must be `int64`."
                                         "Got `dtype` = {}".format(
                                             identifiers.dtype))

                index = faiss.IndexIDMap(index)
                index.add_with_ids(candidates, identifiers)  # pylint: disable=no-value-for-parameter
            else:
                index.add(candidates)

            return index

        self._build_searcher = build_searcher
        self._searcher = None
        self._identifiers = None
コード例 #2
0
ファイル: test_utils.py プロジェクト: satorchi/pyoperators
 def func(env):
     global counter
     with env:
         nthreads = os.getenv('OMP_NUM_THREADS')
         expected = omp_num_threads()
         with pool_threading() as pool:
             assert_equal(int(os.environ['OMP_NUM_THREADS']), 1)
             if mkl is not None:
                 assert_equal(mkl.get_max_threads(), 1)
             counter = 0
             pool.map(func_thread, range(pool._processes))
         assert_equal(os.getenv('OMP_NUM_THREADS'), nthreads)
         if mkl is not None:
             assert_equal(mkl.get_max_threads(), mkl_nthreads)
         assert_equal(counter, expected)
     assert_not_in('OMP_NUM_THREADS', os.environ)
コード例 #3
0
ファイル: misc.py プロジェクト: satorchi/pyoperators
def pool_threading(nthreads=None):
    if nthreads is None:
        nthreads = omp_num_threads()
    try:
        import mkl
        old_mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)
    except ImportError:
        pass
    old_omp_num_threads = os.getenv('OMP_NUM_THREADS')
    os.environ['OMP_NUM_THREADS'] = '1'

    pool = multiprocessing.dummy.Pool(nthreads)
    yield pool

    pool.close()
    pool.join()
    try:
        mkl.set_num_threads(old_mkl_num_threads)
    except NameError:
        pass
    if old_omp_num_threads is not None:
        os.environ['OMP_NUM_THREADS'] = old_omp_num_threads
    else:
        del os.environ['OMP_NUM_THREADS']
コード例 #4
0
def pca_cpp(X):
    """
    Wrapper for cpp implementation of signal decorrelation
    """
    assert (
        X.dtype == np.complex128
    ), "cpp backend only supports double precision complex arrays"

    n_frames, n_freq, n_chan = X.shape

    if has_mkl:
        # We need to deactivate parallelization in mkl
        mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)

    # Make a copy of the input with efficient axis order
    X_T = X.transpose([1, 2, 0]).copy()

    # Create arrays to receive the output
    W = np.zeros((n_freq, n_chan, n_chan), dtype=X.dtype)

    pca_core(X_T, W)

    if has_mkl:
        mkl.set_num_threads(mkl_num_threads)

    return X_T.transpose([2, 0, 1]), W
コード例 #5
0
    def __init__(self, rd=False, path=".", threads_num=None):
        """
        Customize the newly created instance

        Parameters
        ----------
        rd: Boolean, optional
            Whether to redirect the output information to a `txt` file
            default: False
        path: str, optional
            The destination to save the `txt` file
            default: current working directory
        threads_num: int or None, optional
            The number of threads to used by mkl
            The default value `None` implies the maximum number of the system
            default: None
        """

        if threads_num is None:
            self._threads_num = mkl.get_max_threads()
        elif isinstance(threads_num, int) and threads_num > 0:
            self._threads_num = threads_num
        else:
            raise ValueError("Invalid `threads_num` parameter!")

        path = Path(path).resolve()
        path.mkdir(parents=True, exist_ok=True)
        self._rd = rd
        self._path = path
コード例 #6
0
ファイル: OP_ELM.py プロジェクト: TomWerner/skin_detection
 def train(self, batch_size=100, use_gpu=False):
     try:
         import mkl
         print("Using:", mkl.get_max_threads(), "threads")
     except:
         pass
     HTH, HTT, self.beta_matrix = self._calculate_beta(batch_size, use_gpu)
コード例 #7
0
def one_thread_per_process():
    """Return a context manager where only one thread is allocated to a process.

    This function is intended to be used as a with statement like::

        >>> with process_per_thread():
        ...     do_something() # one thread per process

    Notes:
        This function only works when MKL (Intel Math Kernel Library)
        is installed and used in, for example, NumPy and SciPy.
        Otherwise this function does nothing.

    """
    try:
        import mkl

        is_mkl = True
    except ImportError:
        is_mkl = False

    if is_mkl:
        n_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)
        try:
            # block nested in the with statement
            yield
        finally:
            # revert to the original value
            mkl.set_num_threads(n_threads)
    else:
        yield
コード例 #8
0
 def __init__(self,
              tmin=None,
              tmax=None,
              fmin=None,
              fmax=None,
              method_params=None,
              n_jobs='auto',
              comment='default'):
     BaseMarkerSandbox.__init__(self, tmin=None, tmax=None, comment=comment)
     if method_params is None:
         method_params = {}
     if fmax is None:
         fmax = np.inf
     self.fmin = fmin
     self.fmax = fmax
     self.method_params = method_params
     if n_jobs == 'auto':
         try:
             import multiprocessing as mp
             import mkl
             n_jobs = int(mp.cpu_count() / mkl.get_max_threads())
             logger.info('Autodetected number of jobs {}'.format(n_jobs))
         except Exception:
             logger.info('Cannot autodetect number of jobs')
             n_jobs = 1
     self.n_jobs = n_jobs
コード例 #9
0
ファイル: misc.py プロジェクト: ghisvail/pyoperators
def pool_threading(nthreads=None):
    if nthreads is None:
        nthreads = omp_num_threads()
    try:
        import mkl
        old_mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)
    except ImportError:
        pass
    old_omp_num_threads = os.getenv('OMP_NUM_THREADS')
    os.environ['OMP_NUM_THREADS'] = '1'

    pool = multiprocessing.dummy.Pool(nthreads)
    yield pool

    pool.close()
    pool.join()
    try:
        mkl.set_num_threads(old_mkl_num_threads)
    except NameError:
        pass
    if old_omp_num_threads is not None:
        os.environ['OMP_NUM_THREADS'] = old_omp_num_threads
    else:
        del os.environ['OMP_NUM_THREADS']
コード例 #10
0
ファイル: test_utils.py プロジェクト: pchanial/pyoperators
 def func(env):
     global counter
     with env:
         nthreads = os.getenv("OMP_NUM_THREADS")
         expected = omp_num_threads()
         with pool_threading() as pool:
             assert_equal(int(os.environ["OMP_NUM_THREADS"]), 1)
             if mkl is not None:
                 assert_equal(mkl.get_max_threads(), 1)
             counter = 0
             pool.map(func_thread, range(pool._processes))
         assert_equal(os.getenv("OMP_NUM_THREADS"), nthreads)
         if mkl is not None:
             assert_equal(mkl.get_max_threads(), mkl_nthreads)
         assert_equal(counter, expected)
     assert_not_in("OMP_NUM_THREADS", os.environ)
コード例 #11
0
ファイル: komplexity.py プロジェクト: prakash-kavi/nice
def epochs_compute_komplexity(epochs,
                              nbins,
                              tmin=None,
                              tmax=None,
                              backend='python',
                              method_params=None):
    """Compute complexity (K)

    Parameters
    ----------
    epochs : instance of mne.Epochs
        The epochs on which to compute the wSMI.
    nbins : int
        Number of bins to use for symbolic transformation
    method_params : dictionary.
        Overrides default parameters for the backend used.
        OpenMP specific {'nthreads'}
    backend : {'python', 'openmp'}
        The backend to be used. Defaults to 'python'.
    """
    picks = pick_types(epochs.info, meg=True, eeg=True)

    if method_params is None:
        method_params = {}

    data = epochs.get_data()[:, picks if picks is not None else Ellipsis]
    time_mask = _time_mask(epochs.times, tmin, tmax)
    data = data[:, :, time_mask]
    logger.info("Running KolmogorovComplexity")

    if backend == 'python':
        start_time = time.time()
        komp = _komplexity_python(data, nbins)
        elapsed_time = time.time() - start_time
        logger.info("Elapsed time {} sec".format(elapsed_time))
    elif backend == 'openmp':
        from ..optimizations.ompk import komplexity as _ompk_k
        nthreads = (method_params['nthreads']
                    if 'nthreads' in method_params else 1)
        if nthreads == 'auto':
            try:
                import mkl
                nthreads = mkl.get_max_threads()
                logger.info(
                    'Autodetected number of threads {}'.format(nthreads))
            except:
                logger.info('Cannot autodetect number of threads')
                nthreads = 1
        start_time = time.time()
        komp = _ompk_k(data, nbins, nthreads)
        elapsed_time = time.time() - start_time
        logger.info("Elapsed time {} sec".format(elapsed_time))
    else:
        raise ValueError('backend %s not supported for KolmogorovComplexity' %
                         backend)
    return komp
コード例 #12
0
ファイル: cpu_count.py プロジェクト: winedarksea/AutoTS
def cpu_count(modifier: float = 1):
    """Find available CPU count, running on both Windows/Linux.

    Attempts to be very conservative:
        * Remove Intel Hyperthreading logical cores
        * Find max cores allowed to the process, if less than machine has total

    Runs best with psutil installed, fallsback to mkl, then os core count/2

    Args:
        modifier (float): multiple CPU count by this value
    """
    import os

    # your basic cpu count, includes logical cores and all of machine
    num_cores = os.cpu_count()
    if num_cores is None:
        num_cores = -1

    # includes logical cores, and counts only cores available to task
    try:
        import psutil

        available_cores = len(psutil.Process().cpu_affinity())
    except Exception:
        # this only works on UNIX I believe
        try:
            available_cores = len(os.sched_getaffinity(0))
        except Exception:
            available_cores = -1

    # only physical cores, includes all available to machine
    try:
        import psutil

        ps_cores = psutil.cpu_count(logical=False)
    except Exception:
        try:
            import mkl

            ps_cores = int(mkl.get_max_threads())
        except Exception:
            ps_cores = int(num_cores / 2)

    core_list = [num_cores, available_cores, ps_cores]
    core_list = [x for x in core_list if x > 0]
    if core_list:
        core_count = min(core_list)
    else:
        core_count = 1
    if modifier != 1:
        core_count = int(modifier * core_count)
    core_count = 1 if core_count < 1 else core_count
    return core_count
コード例 #13
0
def regress_out(adata, regr):
    debug and print(adata.obs)
    debug and print(adata.var)
    if use_fastpp and regr_type > 0:
        #myscpp.regress_out(adata, regr)
        numpy_regress_out(adata, regr)
    else:
        kthr = mkl.get_max_threads()
        debug and print("MKL threads was at", kthr, "setting to 1")
        mkl.set_num_threads(1)
        sc.pp.regress_out(adata, regr)
        mkl.set_num_threads(kthr)
        debug and print("MKL threads restored to", kthr)
コード例 #14
0
ファイル: five.py プロジェクト: yexiayin/piva
def five_cpp(
    X,
    n_iter=3,
    proj_back=True,
    W0=None,
    model=defaults.model,
    return_filters=False,
    callback=None,
    callback_checkpoints=[],
    cost_callback=None,
    **kwargs,
):
    assert (X.dtype == np.complex128
            ), "FIVE only supports complex double precision arrays"

    n_frames, n_freq, n_chan = X.shape

    if has_mkl:
        # We need to deactivate parallelization in mkl
        mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)

    # Make a copy of the input with efficient axis order
    X_T = X.transpose([1, 2, 0]).copy()

    # Create arrays to receive the output
    W = np.zeros((n_freq, n_chan, n_chan), dtype=X.dtype)
    Y = np.zeros((n_freq, 1, n_frames), dtype=X.dtype)

    if W0 is not None:
        X_T = W0 @ X_T

    if model == "laplace":
        five_laplace_core(X_T, Y, W, n_iter)
    elif model == "gauss":
        five_gauss_core(X_T, Y, W, n_iter)
    else:
        raise ValueError(f"No such model {model}")

    if has_mkl:
        mkl.set_num_threads(mkl_num_threads)

    Y = Y.transpose([2, 0, 1]).copy()

    if proj_back:
        Y = project_back(Y, X[:, :, 0])

    if return_filters:
        return Y, W
    else:
        return Y
コード例 #15
0
ファイル: ssutils.py プロジェクト: holehouse-lab/camparitraj
def set_numpy_threads(num_threads):
    # Currently only MKL is supported on Windows as it's installed alongside
    # the other packages via conda. A "traditional" virtual environment requires
    # access to a compiler and other libraries for successful compilation.
    if platform.system().lower() == 'windows':
        import mkl
        mkl.set_num_threads(num_threads)
        return mkl.get_max_threads(), MKL_LIBRARY

    candidates, other_candidates = _identify_library_paths()
    if len(candidates) > 0:
        set_threads, library = _set_numpy_threads(candidates, num_threads)
    else:
        set_threads, library = _set_numpy_threads(other_candidates,
                                                  num_threads)
    return set_threads, library
コード例 #16
0
def mkl_get_nthreads():
    """wrapper around MKL ``get_max_threads``.

    Returns
    -------
    max_threads : int
        The maximum number of threads used by MKL. ``-1`` if unable to read out.
    """
    try:
        import mkl  # available in conda MKL
        return mkl.get_max_threads()
    except ImportError:
        try:
            mkl_rt = ctypes.CDLL('libmkl_rt.so')
            return mkl_rt.mkl_get_max_threads()
        except OSError:
            warnings.warn("MKL library not found: can't get nthreads")
    return -1
コード例 #17
0
ファイル: test_utils.py プロジェクト: pchanial/pyoperators
def test_pool_threading():
    try:
        import mkl

        mkl_nthreads = mkl.get_max_threads()
    except ImportError:
        mkl = None
    counter = None

    def func_thread(i):
        global counter
        counter += 1

    @contextmanager
    def get_env(value):
        try:
            del os.environ["OMP_NUM_THREADS"]
        except KeyError:
            pass
        if value is not None:
            os.environ["OMP_NUM_THREADS"] = str(value)
        yield
        if value is not None:
            del os.environ["OMP_NUM_THREADS"]

    def func(env):
        global counter
        with env:
            nthreads = os.getenv("OMP_NUM_THREADS")
            expected = omp_num_threads()
            with pool_threading() as pool:
                assert_equal(int(os.environ["OMP_NUM_THREADS"]), 1)
                if mkl is not None:
                    assert_equal(mkl.get_max_threads(), 1)
                counter = 0
                pool.map(func_thread, range(pool._processes))
            assert_equal(os.getenv("OMP_NUM_THREADS"), nthreads)
            if mkl is not None:
                assert_equal(mkl.get_max_threads(), mkl_nthreads)
            assert_equal(counter, expected)
        assert_not_in("OMP_NUM_THREADS", os.environ)

    for env in get_env(None), get_env(1), get_env(3):
        yield func, env
コード例 #18
0
ファイル: test_utils.py プロジェクト: satorchi/pyoperators
def test_pool_threading():
    try:
        import mkl
        mkl_nthreads = mkl.get_max_threads()
    except ImportError:
        mkl = None
    counter = None

    def func_thread(i):
        global counter
        counter += 1

    @contextmanager
    def get_env(value):
        try:
            del os.environ['OMP_NUM_THREADS']
        except KeyError:
            pass
        if value is not None:
            os.environ['OMP_NUM_THREADS'] = str(value)
        yield
        if value is not None:
            del os.environ['OMP_NUM_THREADS']

    def func(env):
        global counter
        with env:
            nthreads = os.getenv('OMP_NUM_THREADS')
            expected = omp_num_threads()
            with pool_threading() as pool:
                assert_equal(int(os.environ['OMP_NUM_THREADS']), 1)
                if mkl is not None:
                    assert_equal(mkl.get_max_threads(), 1)
                counter = 0
                pool.map(func_thread, range(pool._processes))
            assert_equal(os.getenv('OMP_NUM_THREADS'), nthreads)
            if mkl is not None:
                assert_equal(mkl.get_max_threads(), mkl_nthreads)
            assert_equal(counter, expected)
        assert_not_in('OMP_NUM_THREADS', os.environ)

    for env in get_env(None), get_env(1), get_env(3):
        yield func, env
コード例 #19
0
def set_threads(num_threads=None, verbose=False, no_guessing=False):
    '''
    Get and set the number of threads used by FFT libraries.

    Parameters
    ----------
    num_threads : int, default None
        Number of threads requested. If None, do not set threads.
    verbose : bool, default False
        If True, output debug messages to STDOUT.
    no_guessing : bool, default false
        If False and MKL is not found at all, return a guess of 1 thread
        since numpy.fft and scipy.fftpack are single-threaded without MKL.
        If True, return len(os.sched_getaffinity(0)) or os.cpu_count().

    Returns
    -------
    int or None
        The number of threads successfully set, or None on failure.
    '''

    try:
        import mkl
    except ImportError:
        if hasattr(np, '__mkl_version__') or no_guessing:
            # MKL present but no mkl-service, so guess number of CPUs
            if verbose:
                print(f'TAG: WARNING: mkl-service module was not '
                      f'found. Number of threads is likely inaccurate!')
            if hasattr(os, 'sched_getaffinity'):
                return len(os.sched_getaffinity(0)), 'os.sched_getaffinity'
            else:
                return os.cpu_count(), 'os.cpu_count'
        else:
            # no MKL, so assume not threaded
            return 1, 'guessing'
    else:
        if num_threads:
            mkl.set_num_threads(num_threads)
        return mkl.get_max_threads(), 'mkl.get_max_threads'

    return None, None
コード例 #20
0
    def __exit__(self, exc_type, exc_value, traceback):
        """
        Close the opened file and restore sys.stdout before exit
        """

        mkl.set_num_threads(mkl.get_max_threads())

        if self._rd:
            print("=" * 80)
            if exc_type is None:
                print("Non exception has occurred!")
            else:
                print("Exc_type: {0}".format(exc_type))
                print("Exc_value: {0}".format(exc_value))
                print("Traceback:")
                print_tb(traceback, file=self._fp)
            print("=" * 80)
            print("Exit run time environment at: {0}".format(
                strftime(TIME_FORMAT)),
                  flush=True)
            sys.stdout = self._stdout
            self._fp.close()
        return False
コード例 #21
0
#!/usr/bin/env python
# coding=utf-8
import json
import sys
import numpy as np
import mkl
mkl.get_max_threads()
import faiss

values = []
tokens = []
line_index = []

############# 逐行读入json文件,并解析 #############
for line in sys.stdin:
    line = line.strip()
    data = json.loads(line)

    token = ''
    value = []

    ############# query里每个字的embedding,做concat #############
    for i in data['features']:
        token = token + i['token']

    for j in data['features'][0]['layers']:
        value.extend(j['values'])

    ############# 结果保存 #############
    values.append(value)
    tokens.append(token)
コード例 #22
0
if args['num_repeats'] > 0:
    num_repeats = args['num_repeats']
else:
    num_repeats = NUM_REPEATS
print("{} repeats specified".format(num_repeats))

try:
    import mkl
    use_mkl = True
except:
    use_mkl = False

if use_mkl:
    mkl.set_num_threads(num_threads)
    print("Number of threads is {}".format(mkl.get_max_threads()))
else:
    print("mkl unavailable")


class FidelityComparison:
    cNOT = Qobj([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
    cNOT.dims = [[2, 2], [2, 2]]
    iden = identity(2)

    def __init__(self, num_qubits):
        print("Starting init")
        self.num_qubits = num_qubits
        self.dim = 2**self.num_qubits

        self.full_cNOT = self.make_full_cNOT()
コード例 #23
0
ファイル: numpy_mkl_fftn.py プロジェクト: cjayb/benchmarks
import numpy
import numpy.fft as fft
numpy.use_fastnumpy = True
import time
#from scipy.fftpack import fft
import mkl

print 'Intel MKL version:', mkl.get_version_string()
print 'Intel cpu_clocks:', mkl.get_cpu_clocks()
print 'Intel cpu_frequency:', mkl.get_cpu_frequency()
#print 'Intel MKL, freeing buffer memory:', mkl.thread_free_buffers()

print 'max Intel threads:', mkl.get_max_threads()

mkl.set_num_threads(2)

N = 2**16

print 'using numpy', numpy.__version__
a = numpy.random.rand(2, N)
print a.shape, 'items'
t0 = time.clock()
for i in range(100):
    continue
base = time.clock()-t0
fftn = fft.fftn
t0 = time.clock()
for i in range(100):
    r = fftn(a, (N,), (1,))
print 'simple loop', time.clock()-t0-base
コード例 #24
0
def overiva_cpp(
    X,
    n_src=None,
    n_iter=20,
    proj_back=True,
    model=defaults.model,
    return_filters=False,
    **kwargs,
):
    """
    Wrapper for the C++ implementation of AuxIVA

    Parameters
    ----------
    X: ndarray (nframes, nfrequencies, nchannels)
        STFT representation of the signal
    n_src: int, optional
        The number of sources or independent components
    n_iter: int, optional
        The number of iterations (default 20)
    proj_back: bool, optional
        Scaling on first mic by back projection (default True)
    model: str
        The model of source distribution 'gauss' or 'laplace' (default)
    return_filters: bool
        If true, the function will return the demixing matrix too

    Returns
    -------
    Returns an (nframes, nfrequencies, nsources) array. Also returns
    the demixing matrix (nfrequencies, nchannels, nchannels)
    if ``return_values`` keyword is True.
    """

    if model not in ["laplace", "gauss"]:
        raise ValueError(f"No such model {model}")

    n_frames, n_freq, n_chan = X.shape

    if n_src is None:
        n_src = n_chan

    # new shape: (nfrequencies, nchannels, nframes)
    X_T = X.transpose([1, 2, 0]).copy()

    if has_mkl:
        # We need to deactivate parallelization in mkl
        mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)

    # Initialize the demixing matrix
    W = np.zeros((n_freq, n_chan, n_chan), dtype=np.complex128)
    W[:, :, :] = np.eye(n_chan)[None, :, :]

    if n_src == n_chan:
        Y_T = X_T.copy()

        if model == "laplace":
            auxiva_laplace_core(X_T, Y_T, W, n_iter)
        elif model == "gauss":
            auxiva_gauss_core(X_T, Y_T, W, n_iter)

    else:
        Y_T = X_T[:, :n_src, :].copy()
        W_loc = W[:, :n_src, :].copy()

        if model == "laplace":
            overiva_laplace_core(X_T, Y_T, W_loc, n_iter)
        elif model == "gauss":
            overiva_gauss_core(X_T, Y_T, W_loc, n_iter)

        if return_filters:
            # copy demixing matrix to return to user
            W[:, :n_src, :] = W_loc
            W[:, n_src:, n_src:] *= -1

            # covariance of input signal
            Cx = (X_T @ tensor_H(X_T)) / n_frames

            # build missing part of demixing matrix
            tmp = W[:, :n_src, :] @ Cx
            W[:, n_src:, :n_src] = tensor_H(
                np.linalg.solve(tmp[:, :, :n_src], tmp[:, :, n_src:]))

    if has_mkl:
        mkl.set_num_threads(mkl_num_threads)

    Y = Y_T.transpose([2, 0, 1]).copy()

    if proj_back:
        Y = project_back(Y, X[:, :, 0])

    if return_filters:
        return Y, W
    else:
        return Y
コード例 #25
0
ファイル: auxiva_iss.py プロジェクト: yexiayin/piva
def auxiva_iss_cpp(
    X,
    n_src=None,
    n_iter=20,
    proj_back=True,
    model=defaults.model,
    return_filters=False,
    callback=None,
):
    """
    Wrapper for the C++ implementation of MixIVA

    Parameters
    ----------
    X: ndarray (nframes, nfrequencies, nchannels)
        STFT representation of the signal
    n_src: int, optional
        The number of sources or independent components
    n_iter: int, optional
        The number of iterations (default 20)
    proj_back: bool, optional
        Scaling on first mic by back projection (default True)
    model: str
        The model of source distribution 'gauss' or 'laplace' (default)
    return_filters: bool
        If true, the function will return the demixing matrix too
    callback: func
        A callback function called every 10 iterations, allows to monitor
        convergence

    Returns
    -------
    Returns an (nframes, nfrequencies, nsources) array. Also returns
    the demixing matrix (nfrequencies, nchannels, nsources)
    if ``return_values`` keyword is True.
    """
    n_frames, n_freq, n_chan = X.shape

    X_T_original = X.transpose([1, 2, 0])
    X_T = X_T_original.copy()

    if has_mkl:
        # We need to deactivate parallelization in mkl
        mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)

    if model == "laplace":
        auxiva_iss_laplace_core(X_T, n_iter)
    elif model == "gauss":
        auxiva_iss_gauss_core(X_T, n_iter)
    else:
        raise ValueError(f"No such model {model}")

    if has_mkl:
        mkl.set_num_threads(mkl_num_threads)

    if return_filters is not None:
        # Demixing matrices were not computed explicitely so far,
        # do it here, if necessary
        W = X_T[:, :, :n_chan] @ np.linalg.inv(X_T_original[:, :, :n_chan])

    Y = X_T.transpose([2, 0, 1]).copy()

    if proj_back:
        Y = project_back(Y, X[:, :, 0])

    if return_filters:
        return Y, W
    else:
        return Y
コード例 #26
0
from trainer import Trainer
from utils import (update_task, get_max_of_db_column,
                   get_a_task, ExploitationNeeded,
                   LossIsNaN, get_task_ids_and_scores, PopulationFinished,
                   get_col_from_populations, RemainingTasksTaken,
                   print_with_time, ExploitationOcurring,
                   create_new_population)
from config import (get_optimizer, DATA_DIR, MODEL_CLASS, LOSS_FN,
                    HYPERPARAM_NAMES, EPOCHS, BATCH_SIZE, POPULATION_SIZE,
                    EXPLOIT_INTERVAL, USE_SQLITE)


if __name__ == "__main__":
    # TODO: Does this help?
    nproc = mkl.get_max_threads()  # e.g. 12
    mkl.set_num_threads(nproc)

    parser = argparse.ArgumentParser(description="Population Based Training")
    parser.add_argument("-g", "--gpu", type=int, default=0, help="Selects GPU with the given ID. IDs are those shown in nvidia-smi.")  # noqa
    parser.add_argument("-p", "--population_id", type=int, default=None, help="Resumes work on the population with the given ID. Use -1 to select the most recently created population. Without this flag, a new population will be created.")  # noqa
    parser.add_argument("-e", "--exploiter", action="store_true", help="Set this process as the exploiter. It will be responsible for running the exploit step over the entire population at the end of each interval.")  # noqa
    args = parser.parse_args()

    gpu = args.gpu
    population_id = args.population_id
    exploiter = args.exploiter
    inputs = bcolz.open(osp.join(DATA_DIR, "trn_inputs.bcolz"), 'r')
    targets = bcolz.open(osp.join(DATA_DIR, "trn_targets.bcolz"), 'r')
    pathlib.Path('checkpoints').mkdir(exist_ok=True)
    checkpoint_str = "checkpoints/pop-%03d_task-%03d.pth"
コード例 #27
0
ファイル: RecQ.py プロジェクト: SuperSupeng/pythonIsAmazing
    def execute(self):
        #import the algorithm module
        try:
            importStr = 'from algorithm.rating.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        except ImportError:
            importStr = 'from algorithm.ranking.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        if self.evaluation.contains('-cv'):
            k = int(self.evaluation['-cv'])
            if k <= 1 or k > 10:
                k = 3

            mkl.set_num_threads(max(1, mkl.get_max_threads() / k))

            #create the manager
            manager = Manager()
            m = manager.dict()
            i = 1
            tasks = []

            binarized = False
            if self.evaluation.contains('-b'):
                binarized = True

            for train, test in DataSplit.crossValidation(self.trainingData,
                                                         k,
                                                         binarized=binarized):
                fold = '[' + str(i) + ']'
                if self.config.contains('social'):
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,self.relation,fold)"
                else:
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,fold)"
            #create the process
                p = Process(target=run, args=(m, eval(recommender), i))
                tasks.append(p)
                i += 1
            #start the processes
            for p in tasks:
                p.start()
                if not self.evaluation.contains('-p'):
                    p.join()
            #wait until all processes are completed
            if self.evaluation.contains('-p'):
                for p in tasks:
                    p.join()
            #compute the mean error of k-fold cross validation
            self.measure = [dict(m)[i] for i in range(1, k + 1)]
            res = []
            for i in range(len(self.measure[0])):
                if self.measure[0][i][:3] == 'Top':
                    res.append(self.measure[0][i])
                    continue
                measure = self.measure[0][i].split(':')[0]
                total = 0
                for j in range(k):
                    total += float(self.measure[j][i].split(':')[1])
                res.append(measure + ':' + str(total / k) + '\n')
            #output result
            currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
            outDir = LineConfig(self.config['output.setup'])['-dir']
            fileName = self.config[
                'recommender'] + '@' + currentTime + '-' + str(
                    k) + '-fold-cv' + '.txt'
            FileIO.writeFile(outDir, fileName, res)
            print('The result of %d-fold cross validation:\n%s' %
                  (k, ''.join(res)))

        else:
            if self.config.contains('social'):
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData,self.relation)'
            else:
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData)'
            eval(recommender).execute()
コード例 #28
0
    def get_max_threads_count(self):
        if self.max_threads_count is None:
            self.max_threads_count = mkl.get_max_threads()

        return self.max_threads_count
コード例 #29
0
def run_qso_sims(optim,
                 num_reps=None,
                 verbosity=None,
                 report_phys_params=None,
                 save_results=None,
                 scen_idx=None,
                 reps_idx=None,
                 num_threads=None,
                 num_tslots=None,
                 evo_time=None,
                 fid_err_targ=None,
                 numer_acc=None):
    """
    Attempts a pulse optimisation for specified number of repititions
    (num_reps). Where kwargs are not passed the value is taken from the
    configuration, except scen_idx and reps_idx which are only used in
    output file names.

    This function is called from within the main top-level functions
    of this module.

    Returns
    -------
    multires : MultiRepResult
        Containing RepResult object for each repetition.
        The analysis is run on multires, so the averaged statics are
        available as attributes.
    """

    #print("run_qso_sims\nnum_reps {}, job_idx {}".format(num_reps, job_idx))

    cfg = optim.config
    dyn = optim.dynamics
    tc = optim.termination_conditions
    fid_comp = dyn.fid_computer
    pgen = optim.pulse_generator

    cfg_str = qso.get_cfg_str(optim,
                              num_tslots=num_tslots,
                              evo_time=evo_time,
                              fid_err_targ=fid_err_targ,
                              numer_acc=numer_acc)
    out_file_ext = qso.get_out_file_ext(cfg.data_file_ext,
                                        job_id=cfg.job_id,
                                        scen_idx=scen_idx,
                                        reps_idx=reps_idx)

    if num_reps is None: num_reps = cfg.num_reps
    if verbosity is None: verbosity = cfg.verbosity
    if report_phys_params is None: report_phys_params = cfg.report_phys_params
    if save_results is None: save_results = cfg.save_results
    if num_threads is None: num_threads = cfg.num_threads
    if num_tslots is not None:
        dyn.num_tslots = num_tslots
        pgen.num_tslots = num_tslots
        pgen.tau = None
    if evo_time is not None:
        dyn.evo_time = evo_time
        pgen.pulse_time = evo_time
        pgen.tau = None
    if fid_err_targ is not None: tc.fid_err_targ = fid_err_targ
    if numer_acc is not None: fid_comp.numer_acc = numer_acc

    # Only use stdout for logging messages when first process
    # (which is true when the idx vars are both None or 0)
    base_log = True
    if scen_idx is not None or reps_idx is not None:
        datetimestamp = datetime.datetime.now().strftime('%d%b_%H-%M')
        script_name = "{}-{}.{}".format(cfg_str, datetimestamp, out_file_ext)
        script_path = os.path.join(cfg.output_dir, script_name)
        lfh = open(script_path, 'a')
        base_log = False
    else:
        lfh = sys.stdout

    if verbosity > 0:
        lfh.write("want {} threads per rep\n".format(num_threads))

    try:
        import mkl
        use_mkl = True
    except:
        use_mkl = False

    if use_mkl:
        mkl.set_num_threads(num_threads)
        if verbosity > 0:
            lfh.write("Number of threads set as {}\n".format(
                mkl.get_max_threads()))
    else:
        if verbosity > 0:
            lfh.write("mkl unavailable\n")

    if verbosity > 0:
        lfh.write("Running {} reps under scen_idx {}, reps_idx {}\n".format(
            num_reps, scen_idx, reps_idx))

    multires = qsoresult.MultiRepResult(tc.fid_err_targ,
                                        fid_comp.local,
                                        num_tslots=num_tslots,
                                        evo_time=evo_time,
                                        numer_acc=numer_acc)

    if verbosity > 2:
        lfh.write("multires optional attribs: num_tslots={}, evo_time={}, "
                  "fid_err_targ={}, numer_acc={}\n".format(
                      multires.num_tslots, multires.evo_time,
                      multires.fid_err_targ, multires.numer_acc))

    # Repetition paramaters and results arrays

    # force the random number generator to reseed, as would cause issue
    # when using multiprocessing
    np.random.seed()

    # set up the decoupling slots
    # dyn.num_decoup_tslots implies that a specific decoup tslot has been given
    if dyn.num_decoup_tslots is not None:
        if dyn.num_decoup_tslots == 0:
            # assume all timeslots
            dyn.decoup_tslots = np.ones([dyn.num_tslots])
        else:
            dyn.decoup_tslots = np.zeros([dyn.num_tslots])
            dyn.decoup_tslots[:dyn.num_decoup_tslots] = 1

    if verbosity > 2:
        lfh.write("Decoup timeslots: {}\n".format(dyn.decoup_tslots))

    if len(dyn.decoup_tslots) != dyn.num_tslots:
        raise RuntimeError("Number of decoupling tslots {} not equal to "
                           "number of timeslots {}".format(
                               len(dyn.decoup_tslots, num_tslots)))

    try:
        for k in range(num_reps):
            # If hspace_order has random 0 index or 01 separation
            # (relating to the position and separation of the qubits
            # which the 2-qubit gate acts upon) then regenerate the
            # the hspace_order for each repetition
            if dyn.auto_hspace and (dyn.hspace_0_idx < 0
                                    or dyn.hspace_01_sep < 0):
                dyn.hspace_order = qso.get_coupling_hspace(
                    dyn.num_qubits, dyn.hspace_0_idx, dyn.hspace_01_sep)
                if verbosity > 0:
                    lfh.write("reconfiguring drift with hspace_order "
                              "= {}\n".format(dyn.hspace_order))
                dyn.drift_dyn_gen = qso.get_drift(dyn)

            # Generate# pulses for each control
            init_amps = np.zeros([dyn.num_tslots, dyn.num_ctrls])
            pgen = optim.pulse_generator
            for j in range(dyn.num_ctrls):
                init_amps[:, j] = pgen.gen_pulse()
            if dyn.decoup_x > 0:
                for i in dyn.Sx_cidx:
                    init_amps[:, i] += dyn.decoup_tslots * dyn.decoup_x
            if dyn.decoup_y > 0:
                for i in dyn.Sy_cidx:
                    init_amps[:, i] += dyn.decoup_tslots * dyn.decoup_y
            if dyn.decoup_z > 0:
                for i in dyn.Sz_cidx:
                    init_amps[:, i] += dyn.decoup_tslots * dyn.decoup_z
            dyn.initialize_controls(init_amps)

            if cfg.save_initial_amps:
                pulsefile = "init_amps_{}_rep{}.{}".format(
                    cfg_str, k + 1, out_file_ext)
                pfpath = os.path.join(cfg.output_dir, pulsefile)
                dyn.save_amps(pfpath, times="exclude")
                if verbosity > 1: lfh.write("Initial amps saved\n")

            if optim.dump:
                optim.dump.clear()
                optim.dump.dump_file_ext = out_file_ext
                optim.dump.fname_base = "optim_dump_rep{}_{}".format(
                    k + 1, cfg_str)
            if dyn.dump:
                dyn.dump.clear()
                dyn.dump.dump_file_ext = out_file_ext
                dyn.dump.fname_base = "dyn_dump_rep{}_{}".format(
                    k + 1, cfg_str)

            if verbosity > 0:
                lfh.write("\nStarting pulse optimisation {} of {}\n".format(
                    k + 1, num_reps))
            if verbosity > 1:
                lfh.write("Max wall time {}\n".format(
                    optim.termination_conditions.max_wall_time))
            optres = optim.run_optimization()

            optres.optim_dump = optim.dump
            optres.dyn_dump = dyn.dump

            repres = multires.add_optim_result(optres)

            if cfg.save_final_amps:
                pulsefile = "final_amps_{}_rep{}.{}".format(
                    cfg_str, k + 1, out_file_ext)
                pfpath = os.path.join(cfg.output_dir, pulsefile)
                dyn.save_amps(pfpath, times="exclude")
                if verbosity > 1: lfh.write("Final amps saved\n")

            if verbosity > 0 and cfg.report_stats:
                lfh.write("Optimising complete. Stats follow:\n")
                optres.stats.report()

            if verbosity > 0:
                lfh.write("********* Summary *****************\n")
                lfh.write("Initial fidelity error {}\n".format(
                    optres.initial_fid_err))
                lfh.write("Final fidelity error {}\n".format(optres.fid_err))
                if fid_comp.local:
                    lfh.write("Final TRUE choi fidelity error {}\n".format(
                        1 - dyn.fid_computer.compute_global_choi_fid()))
                lfh.write("Terminated due to {}\n".format(
                    optres.termination_reason))
                lfh.write("Number of iterations {}\n".format(optres.num_iter))
                lfh.write("Completed in {} HH:MM:SS.US\n".format(
                    datetime.timedelta(seconds=optres.wall_time)))
                lfh.write("Final gradient normal {}\n".format(
                    optres.grad_norm_final))
                lfh.write("***********************************\n")

            if optres.optim_dump:
                if verbosity > 0: lfh.write("Optim dump saved\n")
                optres.optim_dump.writeout()

            if optres.dyn_dump:
                if verbosity > 0: lfh.write("Dynamics dump saved\n")
                optres.dyn_dump.writeout()

            if cfg.keep_optim_result:
                repres.optim_result = optres
            else:
                del (optres)

    except KeyboardInterrupt as e:
        lfh.write("\nProcessing interrupted\n")
        if not base_log:
            lfh.close()
        raise e

    if verbosity > 0:
        lfh.write("\n***** ALL SEARCHING FINISHED *****\n\n")

    multires.analyse_results()

    if save_results:
        fname = "results_{}.{}".format(cfg_str, out_file_ext)
        fpath = os.path.join(cfg.output_dir, fname)
        with open(fpath, 'w') as fh:
            multires.write_results(fh)
            if verbosity > 0:
                lfh.write("Results saved to:\n{}\n".format(fpath))

    if verbosity > 0:
        lfh.write("\nFull results\n")
        multires.write_results(lfh)
        # Print very short summary
        multires.report_analysis(f=lfh)
        if report_phys_params:
            qso.print_phys_params(optim, f=lfh)

    if not base_log:
        lfh.close()

    return multires
コード例 #30
0
ファイル: mp.py プロジェクト: treverhines/PyGeoNS
def parmap(f,args,workers=None):
  '''  
  evaluates [f(a) for a in args] in parallel

  if workers is 0 then the built-in map is used. If workers is greater 
  than one then the parent process spawns that many worker processes to 
  evaluate the map. 
  
  Parameters
  ----------
  f : callable

  a : list
    list of arguments to *f*
    
  workers : int, optional
    number of subprocess to spawn. Defaults to half the available 
    cores plus one

  NOTES
  -----
  If the *mkl* package is installed then this function first sets the 
  maximum number of allowed threads per process to 1. This is to help 
  prevents spawned subprocesses from using multiple cores. The number 
  of allowed threads is reset after all subprocesses have finished.
    
  '''
  if workers is None:
    # default number of processes to have simultaneously running
    workers = cpu_count()//2 + 1

  if workers < 0:
    raise ValueError('number of worker processes must be 0 or greater')
    
  if workers == 0:
    # perform the map on the parent process
    return [f(i) for i in args]

  # attempt to prevent lower level functions from running in parallel
  if _HAS_MKL:
    starting_threads = mkl.get_max_threads()
    mkl.set_num_threads(1)

  # q_in has a max size of 1 so that args is not copied over to 
  # the next process until absolutely necessary
  q_in = Queue(1)
  q_out = Queue()
  # any exceptions found by the child processes are put in this queue 
  # and then raised by the parent
  q_err = Queue()

  # spawn worker processes
  procs = []
  for i in range(workers):
    p = Process(target=_f,args=(f,q_in,q_out,q_err))
    # process is starting and waiting for something to be put on q_in
    p.start()
    procs += [p] 

  submitted_tasks = 0
  for a in args:
    q_in.put((submitted_tasks,a))
    submitted_tasks += 1

  # indicate that nothing else will be added
  for i in range(workers):
    q_in.put(('DONE',None))


  # allocate list of Nones and then fill it in with the results
  val_list = [None for i in range(submitted_tasks)]
  err_list = [None for i in range(submitted_tasks)]
  for i in range(submitted_tasks):
    idx,err = q_err.get()
    err_list[idx] = err
    idx,val = q_out.get()
    val_list[idx] = val

  # terminate all processes
  for p in procs:
    p.join()

  # close queues
  q_in.close()
  q_out.close()
  q_err.close()

  # raise an error if any were found
  if any([e is not None for e in err_list]):
    raise ParmapError(err_list)

  # reset the number of threads to its original value
  if _HAS_MKL:
    mkl.set_num_threads(starting_threads)
    
  return val_list
コード例 #31
0
def show_info():
    try:
        import mkl
        print "MKL MAX THREADS:", mkl.get_max_threads()
    except ImportError:
        print "MKL NOT INSTALLED"
コード例 #32
0
    except ImportError:
        pass


logger = log_utils.setup_logger('style_transfer')


def set_thread_count(threads):
    """Sets the maximum number of MKL threads for this process."""
    if MKL_THREADS is not None:
        mkl.set_num_threads(max(1, threads))


try:
    import mkl
    MKL_THREADS = mkl.get_max_threads()
    set_thread_count(1)
except ImportError:
    pass


class StatLogger:
    """Collects per-iteration statistics to be written to a CSV file on exit."""
    def __init__(self):
        self.lock = CTX.Lock()
        self.stats = []
        self.start_time = None

    def update_current_it(self, **kwargs):
        with self.lock:
            self.stats[-1].update(kwargs)
コード例 #33
0
    def __init__(
            self,
            name=None,
            physics=None,
            nVars=1,
            realVars=False,
            # Grid resolution and extent
            nx=64,
            ny=None,
            Lx=2.0 * pi,
            Ly=None,
            # Solver parameters
            t=0.0,
            dt=1.0e-2,  # Fixed numerical time-step.
            step=0,  # Initial or current step
            timeStepper="forwardEuler",  # Time-stepping method
            nThreads=1,  # Number of threads for FFTW
            useFilter=False,  # Use exp jilter rather than dealias
    ):

        # Default grid is square when user specifes only nx
        if Ly is None: Ly = Lx
        if ny is None: ny = nx

        # Default 'name' is the name of the script that runs the model
        if name is None:
            scriptName = os.path.basename(sys.argv[0])
            self.name = scriptName[:-3]  # Remove .py from the end.
        else:
            self.name = name

        self.physics = physics
        self.nVars = nVars
        self.realVars = realVars

        self.nx = nx
        self.ny = ny
        self.Lx = Lx
        self.Ly = Ly

        self.t = t
        self.dt = dt
        self.step = step

        self.timeStepper = timeStepper
        self.useFilter = useFilter

        if nThreads is 'maximum' or nThreads > mkl.get_max_threads():
            self.nThreads = mkl.get_max_threads()
        else:
            self.nThreads = nThreads

        self._input = {
            key: value
            for key, value in self.__dict__.items()
            if type(value) in (str, float, int,
                               bool) and key not in ('realVars', 'nVars',
                                                     'physics')
        }

        np.use_fastnumpy = True
        mkl.set_num_threads(self.nThreads)

        # Initialization routines defined in doublyPeriodic Base Class
        self._init_numerical_parameters()
        self._init_fft()

        # Initialization routines defined in the physical problem's subclass
        self._init_problem_parameters()
        self._init_linear_coeff()

        # Initialize the time-stepper
        self._timeStepper = getattr(timeStepping.methods,
                                    self.timeStepper)(self)
        self._step_forward = self._timeStepper.step_forward
コード例 #34
0
import time
import numpy as np
import matplotlib.pyplot as plt

import sklearn.cross_decomposition

try:
    import pycifa
except:
    import sys
    sys.path.append('../')
try:
    import mkl
    # global
    _MAX_NUM_OF_THREADS = mkl.get_max_threads()
    mkl.set_num_treads(_MAX_NUM_OF_THREADS)
except:
    pass

from pycifa import JIVE
from pycifa import PMFsobi
from pycifa import cobe
from pycifa import cobec
from pycifa import call_mcca
from pycifa import CalcSIR

from pycifa.utils import princomp
from pycifa.utils import addGaussianNoise

from pycifa.tools import loadmat
コード例 #35
0
ファイル: mkl.py プロジェクト: lelegan/modl
 def __init__(self, num_threads):
     self._old_num_threads = mkl.get_max_threads()
     self.num_threads = num_threads
コード例 #36
0
ファイル: test.py プロジェクト: s4ndhyac/lahacks-safeLA
 def test_get_max_threads(self):
     self.assertTrue(isinstance(mkl.get_max_threads(), int))
コード例 #37
0
ファイル: DMFX_Backtest_Ben.py プロジェクト: bengitget/ss

# In[2]:

start_time = time.time()


# In[3]:

# add Eureka to path
if os.name == 'nt':
    sys.path.append(os.path.abspath('../../eureka'))
    
if os.name == 'posix':
    import mkl
    mkl.set_num_threads(mkl.get_max_threads())


# In[4]:

import pandas as pd
import numpy as np
import datetime as dt


# In[5]:

from eureka.risk import calc_ewma_riskmodel
from eureka.signal import calc_zscore, score_to_alpha
from eureka.optimize import mean_variance_optimizer
from eureka.backtest import backtest_metrics
コード例 #38
0
	matfile = h5py.File(datapath + "diffusedvals.h5")
	for i in range(0,96):
		print(i)
		mats += [matfile["/traj-slot-" + str(i).zfill(3) + "-set-" + str(setselect).zfill(3)][:]]
	matfile.close()

	p = mp.Pool(processes=threads);
	g = df.groupby("agentnum")
	# print(len(g))
	out = p.map(processtraj,g,chunksize=100)
	
	p.close();

	arr = np.concatenate(out);
	outfile = h5py.File(datapath + "finalexptraj-"+str(setselect)+".h5")
	ds = outfile.create_dataset("/exptraj",data=arr,fillvalue=0.,compression='gzip',compression_opts=9)
	ds = outfile.create_dataset("/slist",data=slist,compression='gzip',compression_opts=9)
	outfile.close();

	# print("plotting")
	# for i in out: 
	# 	plt.plot(i,alpha=0.5,linewidth=0.1,color='k')

	# plt.show();



if __name__ == "__main__":
	threads = mkl.get_max_threads();
	threads = 8;
	main(threads)
コード例 #39
0
ファイル: run_parallel.py プロジェクト: dengemann/aws-hacks
Example:

```bash
run_parallel.py my_script --par_args subject1 subject2 subject3 \\
    --par_target subject --args --n_jobs 2
```
"""

import shlex
import subprocess
from argparse import ArgumentParser, REMAINDER
import multiprocessing
n_cpus = multiprocessing.cpu_count()
try:
    import mkl
    n_threads = mkl.get_max_threads()
except ImportError:
    n_threads = 1
n_par = n_cpus / n_threads

parser = ArgumentParser(description='Run script in distributed fashion')
parser.add_argument(
    '--script', metavar='script', type=str, nargs='?',
    help='The name of the script to launch', required=True)
parser.add_argument(
    '--par_args', metavar='par_args', type=str, nargs='+',
    help='multiple values to parallelize over', required=True)
parser.add_argument(
    '--par_target', metavar='par_target', type=str, nargs='?',
    help='the target variable to which parallel values are passed',
    required=True)
コード例 #40
0
ファイル: mp.py プロジェクト: treverhines/ModEst
def parmap(f,args,workers=None):
  '''  
  evaluates [f(a) for a in args] in parallel

  if workers is 0 then the built-in map is used. If workers is greater 
  than one then the parent process spawns that many worker processes to 
  evaluate the map.
  '''
  starting_threads = mkl.get_max_threads()
  if workers is None:
    # starting_threads is a good estimate for the number of processes 
    # that can be simultaneously running
    workers = starting_threads

  if workers < 0:
    raise ValueError('number of worker processes must be 0 or greater')
    
  if workers == 0:
    # use the built-in sequential map 
    return map(f,args)

  # make sure that lower level functions are not running in parallel
  mkl.set_num_threads(1)

  # q_in has a max size of 1 so that args is not copied over to 
  # the next process until absolutely necessary
  q_in = Queue(1)
  q_out = Queue()
  # any exceptions found by the child processes are put in this queue 
  # and then raised by the parent
  q_err = Queue()

  # spawn worker processes
  procs = []
  for i in range(workers):
    p = Process(target=_f,args=(f,q_in,q_out,q_err))
    # process is starting and waiting for something to be put on q_in
    p.start()
    procs += [p] 

  submitted_tasks = 0
  for a in args:
    q_in.put((submitted_tasks,a))
    submitted_tasks += 1

  # indicate that nothing else will be added
  for i in range(workers):
    q_in.put(('DONE',None))


  # allocate list of Nones and then fill it in with the results
  val_list = [None for i in range(submitted_tasks)]
  err_list = [None for i in range(submitted_tasks)]
  for i in range(submitted_tasks):
    idx,err = q_err.get()
    err_list[idx] = err
    idx,val = q_out.get()
    val_list[idx] = val

  # terminate all processes
  for p in procs:
    p.join()

  # close queues
  q_in.close()
  q_out.close()
  q_err.close()

  # raise an error if any were found
  if any([e is not None for e in err_list]):
    raise ParmapError(err_list)

  # reset the number of threads to its original value
  mkl.set_num_threads(starting_threads)
  return val_list