Example #1
0
def run(expt, display=False):
    if isinstance(expt, str):
        expt = get_experiment(expt)

    storage.ensure_directory(expt.figures_dir())
    storage.ensure_directory(expt.outputs_dir())

    mkl.set_num_threads(1)
        
    v = gnp.garray(expt.dataset.load().as_matrix())
    v = 0.999 * v + 0.001 * 0.5

    if expt.permute:
        idxs = np.random.permutation(v.shape[0])
        v = v[idxs]

    if display:
        expt.diagnostics += ['objective']
        expt.show_after = expt.save_after
    visuals = Visuals(expt, v)


    if expt.init_rbm == 'base_rates':
        init_rbm = None
    elif isinstance(expt.init_rbm, TrainedRBM):
        init_rbm = load_trained_rbm(expt.init_rbm.location).convert_to_garrays()
    else:
        raise RuntimeError('Unknown init_rbm')

    assert isinstance(expt.training, rbm_training.TrainingParams)
    rbm_training.train_rbm(v, expt.nhid, expt.training, after_step=visuals.after_step, init_rbm=init_rbm)
Example #2
0
def handle_mkl(max_threads):
    """Set max threads if mkl is availavle"""
    try:
        import mkl
        mkl.set_num_threads(max_threads)
    except ImportError:
        pass
def configure(num_jobs=8, TEST=False, subtract=0, num_proc=None, num_thread_per_proc=None):
    '''
    num_jobs is typically the # of genes we are parallelizing over
    '''
    if num_proc is None:
        num_proc = multiprocessing.cpu_count() - subtract

    if num_jobs > num_proc:
        num_jobs = num_proc

    if num_thread_per_proc is None:
        num_thread_per_proc = int(np.floor(num_proc/num_jobs))

    if TEST:
        num_jobs = 1
        num_thread_per_proc = 1

    try:
        import mkl
        mkl.set_num_threads(num_thread_per_proc)    
    except ImportError:
        print "MKL not available, so I'm not adjusting the number of threads"

    print "Launching %d jobs with %d MKL threads each" % (num_jobs, num_thread_per_proc)

    return num_jobs
Example #4
0
def silhouette_original_clusterings(dataset='CB1', neuropil='Antennal_lobe', clusterer_or_k=60):
    """Returns a pandas dataframe with the silhouette index of each cluster member.
    The dataframe have columns (cluster_id, member_id, silhouette).
    """

    # Read the expression matrix
    print('Reading expression matrix')
    Xdf = ExpressionDataset.dataset(dset=dataset, neuropil=neuropil).Xdf(index_type='string')

    # Generate a flat map cluster_id -> members
    print('Finding cluster assignments')
    clusters_df, _ = get_original_clustering(dataset=dataset, neuropil=neuropil,
                                             clusterer_or_k=clusterer_or_k)
    dfs = []
    for cluster_id, members in zip(clusters_df.cluster_id,
                                   clusters_df.original_voxels_in_cluster):
        dfs.append(pd.DataFrame({'cluster_id': cluster_id, 'member_id': members}))
    members_df = pd.concat(dfs).set_index('member_id').loc[Xdf.index]

    # Compute the distance matrix - this must be parameterised
    print('Computing distance')
    import mkl
    mkl.set_num_threads(6)
    D = dicedist_metric(Xdf)

    # Compute silhouette
    # Here we could go for the faster implementation in third_party, if needed
    print('Computing silhouette index')
    members_df['silhouette'] = silhouette_samples(D.values,
                                                  members_df.cluster_id.values,
                                                  metric='precomputed')
    return (members_df.
            reset_index().
            rename(columns=lambda col: {'index': 'member_id'}.get(col, col))
            [['cluster_id', 'member_id', 'silhouette']])
Example #5
0
def run_all(nsubs, nrois, nthreads=2):
    import mkl
    mkl.set_num_threads(nthreads)
    
    print "setup"
    cmats, design, grp = gen_data(nsubs=nsubs, nrois=nrois, nvoxs=nrois)
    
    print "degree"
    dall = time_fun(local_degree, cmats, design)
    
    print "glm"
    gall = time_fun(local_glm, cmats, design)
    
    print "mdmr"
    mall = time_fun(local_mdmr, cmats, design)
    
    print "svm"
    sall = time_fun(local_svm, cmats, grp)
    
    print "kmeans"
    kall = time_fun(local_kmeans, cmats, grp)
    
    print "end"
    times = np.vstack((dall, gall, mall, sall, kall))
    
    return times
Example #6
0
def pool_threading(nthreads=None):
    if nthreads is None:
        nthreads = omp_num_threads()
    try:
        import mkl
        old_mkl_num_threads = mkl.get_max_threads()
        mkl.set_num_threads(1)
    except ImportError:
        pass
    old_omp_num_threads = os.getenv('OMP_NUM_THREADS')
    os.environ['OMP_NUM_THREADS'] = '1'

    pool = multiprocessing.dummy.Pool(nthreads)
    yield pool

    pool.close()
    pool.join()
    try:
        mkl.set_num_threads(old_mkl_num_threads)
    except NameError:
        pass
    if old_omp_num_threads is not None:
        os.environ['OMP_NUM_THREADS'] = old_omp_num_threads
    else:
        del os.environ['OMP_NUM_THREADS']
def fftvec(vec):
    """
    performs a fft on a vector with 3 components in the first index position
    This is really just a wrapper for fft, fftn and their inverses
    """
    try:
        from anfft import fft, fftn
        fft_type = 1
    except:
#        print "Could not import anfft, importing scipy instead."
#Update 9/18/2013 -- numpy with mkl is way faster than scipy
        import mkl
        mkl.set_num_threads(8)
        from numpy.fft import fft, fftn
        fft_type = 0
        
    if force_gpu:
        fft_type = 2 #set gpu fft's manually -- not sure what a automatic way would look like
    
    from numpy import complex64, shape, array, empty
    if vec.ndim > 2:
        if vec.shape[0] == 3:
            # "Vector": first index has size 3 so fft the other columns
            if fft_type==1:
                return array([fftn(i,measure=True) for i in vec]).astype(complex64)
#                result = empty(vec.shape, dtype=complex64)
#                result[0] = fftn(vec[0], measure=True).astype(complex64)
#                result[1] = fftn(vec[1], measure=True).astype(complex64)
#                result[2] = fftn(vec[2], measure=True).astype(complex64)
#                return result
                
            elif fft_type==0:
                return fftn(vec, axes=range(1,vec.ndim)).astype(complex64)
            elif fft_type==2:
#                return array([gpu_fft(i) for i in vec.astype(complex64)])
                result = empty(vec.shape, dtype=complex64)
                result[0] = gpu_fft(vec[0].copy())
                result[1] = gpu_fft(vec[1].copy())
                result[2] = gpu_fft(vec[2].copy())
                return result
        else: # "Scalar", fft the whole thing
            if fft_type==1:
                return fftn(vec,measure=True).astype(complex64)
            elif fft_type==0:
                return fftn(vec).astype(complex64)
            elif fft_type==2:
                return gpu_fft(vec.copy())
    elif vec.ndim == 1: #Not a vector, so use fft
        if fft_type==1:
            return fft(vec,measure = True).astype(complex64)
        elif fft_type==0:
            return fft(vec).astype(complex64)
        elif fft_type==2:
            return gpu_fft(vec.astype(complex64))
    else:
        #0th index is 3, so its a vector
        #return fft(vec, axis=1).astype(complex64)
        return array([fft(i) for i in vec])
Example #8
0
def set_thread_count(thread_count):
    ''' Set the number of threads to be used by OpenMP
    
    :Parameters:
    
    thread_count : int
                   Number of threads to be used by OpenMP
    '''
    
    if mkl is not None: mkl.set_num_threads(thread_count)
    _omp.set_num_threads(thread_count)
Example #9
0
def parallel_loop(args):

    import numpy as np
    import time

    import pysparsefht
    from utils import random_k_sparse

    try:
        import mkl as mkl_service
        # for such parallel processing, it is better 
        # to deactivate multithreading in mkl
        mkl_service.set_num_threads(1)
    except ImportError:
        pass

    n = args[0]

    b = np.arange(1, n-1)
    K = 2**b
    B = 2**b

    # compute value of C
    C = np.empty_like(b)
    C[:np.floor(n/3)] = n/b[:np.floor(n/3)]
    C[np.floor(n/3):np.floor(2*n/3)] = 3
    C[np.floor(2*n/3):] = n / (n - b[np.floor(2*n/3):])

    algo_name = params['algo_name']
    seed = args[1]

    if algo_name == 'RANDOM':
        algo = pysparsefht.ALGO_RANDOM
    elif algo_name == 'DETERMINISTIC':
        algo = pysparsefht.ALGO_OPTIMIZED
    else:
        ValueError('No such algorithm.')

    # initialize rng
    np.random.seed(seed)

    # a list for return values
    ret = []

    # generate a seed for the C RNG
    C_seed = np.random.randint(4294967295, dtype=np.uint32)

    # create sparse vector
    Tsfht, Tfht = pysparsefht.benchmark(K, B, C, 2**n, 
            loops=params['inner_loops'], warm=params['warm'], body=params['body'], max_mag=params['max_mag'],
            sfht_max_iter=params['max_iter'], seed=C_seed)

    return [Tsfht, Tfht, b, C]
def set_num_threads(nt):
    try:
        import mkl
        mkl.set_num_threads(nt)
    except Exception as e:
        print('Unable to set numthreads in mkl: ' + str(e))

    cv2.setNumThreads(nt)

    nt = str(nt)
    os.environ['OPENBLAS_NUM_THREADS'] = nt
    os.environ['NUMEXPR_NUM_THREADS'] = nt
    os.environ['OMP_NUM_THREADS'] = nt
    os.environ['MKL_NUM_THREADS'] = nt
Example #11
0
    def _initialize_fft(self):

        """ Define the two-dimensional FFT methods.
        """

        if self.use_mkl:
            import mkl
            mkl.set_num_threads(self.nthreads)
            import mkl_fft
            self.fft =  (lambda x : mkl_fft.fft2(x))
            self.ifft = (lambda x : mkl_fft.ifft2(x))
        else:
            self.fft =  (lambda x : np.fft.fft2(x))
            self.ifft = (lambda x : np.fft.ifft2(x))
Example #12
0
def set_num_threads(nt):
    "Get numpy (and others) to use `nt` threads"
    try:
        import mkl
        mkl.set_num_threads(nt)
    except:
        pass
    torch.set_num_threads(1)
    os.environ['IPC_ENABLE'] = '1'
    for o in [
            'OPENBLAS_NUM_THREADS', 'NUMEXPR_NUM_THREADS', 'OMP_NUM_THREADS',
            'MKL_NUM_THREADS'
    ]:
        os.environ[o] = str(nt)
Example #13
0
def parallel_montecarlo(filename, mapper, reducer, jobs_params, n_repetitions, seed=None, n_cpu = None):
    """
    This function implements a basic map-reduce framework based on multiprocessing.Pool.

    Inputs:
        filename - name of output, where the results will be saved as a pickle.
        mapper - this is the function that runs the computaitonal job, given an element of job_params.
        reducer - function for aggregating n_repetitions runs of a job.
        job_params - list of job parameters.
        n_repetitions - number of times to run each job.
        seed - Random seed to be used. To have reproducible results, always specify a seed.
        n_cpu - number of processes to use. The default is to use all available cores.

    Outputs:
        reduced_results - output of reducer on the various simulations
        Also, the results will be saved to a pickle file

    Example: (this computes the means of 3 normal random variables with different means)
    
    >> parallel_montecarlo('testing', numpy.random.normal, numpy.mean, [-1,0,+1], 1000)
        n_cpu: 4
        Saving to ./pickles/testing.pickle.gz
        Saved fields:  n_repetitions, name, results, seed, xs
        Out[14]: [-0.9465148770830919, 0.03763575004851667, 1.056358627427924]
    """
    mkl.set_num_threads(1)
    if n_cpu is None:
        n_cpu = get_n_cpu()
    #print(f'n_cpu: {n_cpu}')

    SEED = seed if seed is not None else 0
    N_SEED_INTS = 4
    mkl_random.seed(SEED)
    iteration_parameters = zip(mkl_random.randint(0, 2**32, size=(len(jobs_params)*n_repetitions, N_SEED_INTS)), itertools.cycle(jobs_params)) 

    wrapped_job_computation_func = functools.partial(set_random_seed_and_apply_func, mapper)
    if n_cpu == 1:
        results = list(itertools.starmap(wrapped_job_computation_func, iteration_parameters))
    else:
        with multiprocessing.Pool(processes=n_cpu) as p:
            results = list(p.starmap(wrapped_job_computation_func, iteration_parameters))

    results_grouped_by_params = [results[i::len(jobs_params)] for i in range(len(jobs_params))]
    reduced_results = list(map(reducer, results_grouped_by_params))

    if filename is not None:
        pickler.dump(filename, name=filename, xs=jobs_params, results=np.array(reduced_results), n_repetitions=n_repetitions, seed=SEED)

    return reduced_results
Example #14
0
def set_fft_threads(n):
    """Sets number of threads used in fft functions."""
    out = CDDMConfig.fft_threads
    CDDMConfig.fft_threads = int(n)
    try:
        import mkl
        mkl.set_num_threads(n)
    except ImportError:
        pass
    try:
        import pyfftw
        pyfftw.config.NUM_THREADS = n
    except ImportError:
        pass
    return out
Example #15
0
def dtest(n=50, d=0.0, r=0.0, model_covariate=True, niters=100, nperms=4999):
    import mkl
    mkl.set_num_threads(2)
    
    d   = float(d)
    r   = float(r)
        
    # Data/Distances
    pvals = np.zeros(niters)
    Fvals = np.zeros(niters)
    for i in xrange(niters):
        # Design
        
        ## Categorical
        gp  = np.repeat([0, 1], n/2)
        np.random.shuffle(gp)
        x   = gp*d + np.random.standard_normal(n)
        
        ## Continuous
        # see http://stackoverflow.com/questions/16024677/generate-correlated-data-in-python-3-3
        # and http://stats.stackexchange.com/questions/19367/creating-two-random-sequences-with-50-correlation?lq=1
        uncorrelated    = np.random.standard_normal((2,n))
        motion          = uncorrelated[0]
        y               = r*motion + np.sqrt(1-r**2)*uncorrelated[1]
        
        ## Design Matrix
        if model_covariate:
            design = np.vstack((np.ones(n), gp, motion)).T
        else:
            design = np.vstack((np.ones(n), gp)).T
                
        # Date
        points = np.vstack((x,y)).T
        
        # Distances
        dmat  = euclidean_distances(points)
        dmats = dmat[np.newaxis,:,:]
        
        # Only the group effect is the variable of interest
        cols = [1]
        
        # Call MDMR
        pval, Fval, _, _ = mdmr(dmats, design, cols, nperms)
        
        pvals[i] = pval
        Fvals[i] = Fval
    
    return pvals, Fvals
Example #16
0
def dtest(n=50, d=0.0, r=0.0, model_covariate=True, niters=100, nperms=4999):
    import mkl
    mkl.set_num_threads(2)

    d = float(d)
    r = float(r)

    # Data/Distances
    pvals = np.zeros(niters)
    Fvals = np.zeros(niters)
    for i in xrange(niters):
        # Design

        ## Categorical
        gp = np.repeat([0, 1], n / 2)
        np.random.shuffle(gp)
        x = gp * d + np.random.standard_normal(n)

        ## Continuous
        # see http://stackoverflow.com/questions/16024677/generate-correlated-data-in-python-3-3
        # and http://stats.stackexchange.com/questions/19367/creating-two-random-sequences-with-50-correlation?lq=1
        uncorrelated = np.random.standard_normal((2, n))
        motion = uncorrelated[0]
        y = r * motion + np.sqrt(1 - r**2) * uncorrelated[1]

        ## Design Matrix
        if model_covariate:
            design = np.vstack((np.ones(n), gp, motion)).T
        else:
            design = np.vstack((np.ones(n), gp)).T

        # Date
        points = np.vstack((x, y)).T

        # Distances
        dmat = euclidean_distances(points)
        dmats = dmat[np.newaxis, :, :]

        # Only the group effect is the variable of interest
        cols = [1]

        # Call MDMR
        pval, Fval, _, _ = mdmr(dmats, design, cols, nperms)

        pvals[i] = pval
        Fvals[i] = Fval

    return pvals, Fvals
Example #17
0
def set_numpy_threads(num_threads):
    # Currently only MKL is supported on Windows as it's installed alongside
    # the other packages via conda. A "traditional" virtual environment requires
    # access to a compiler and other libraries for successful compilation.
    if platform.system().lower() == 'windows':
        import mkl
        mkl.set_num_threads(num_threads)
        return mkl.get_max_threads(), MKL_LIBRARY

    candidates, other_candidates = _identify_library_paths()
    if len(candidates) > 0:
        set_threads, library = _set_numpy_threads(candidates, num_threads)
    else:
        set_threads, library = _set_numpy_threads(other_candidates,
                                                  num_threads)
    return set_threads, library
Example #18
0
def set_num_threads(nt: int = 1):
    """
    Set some machine parameters which control
    the number of threads being spawned.
    
    Default is set to 1.
    
    """
    import os
    import mkl
    mkl.set_num_threads(nt)
    nt = str(nt)
    os.environ['OPENBLAS_NUM_THREADS'] = nt
    os.environ['NUMEXPR_NUM_THREADS'] = nt
    os.environ['OMP_NUM_THREADS'] = nt
    os.environ['MKL_NUM_THREADS'] = nt
Example #19
0
def prep_cwas_workflow(c, subject_infos):
    from CPAC.cwas import create_cwas
    import numpy as np
    
    try:
        import mkl
        mkl.set_num_threads(c.cwasThreads)
    except ImportError:
        pass
    
    print 'Preparing CWAS workflow'
    p_id, s_ids, scan_ids, s_paths = (list(tup) for tup in zip(*subject_infos))
    print 'Subjects', s_ids
    
    # Read in list of subject functionals
    lines   = open(c.cwasFuncFiles).readlines()
    spaths  = [ l.strip().strip('"') for l in lines ]
    
    # Read in design/regressor file
    regressor = np.loadtxt(c.cwasRegressorFile)

    wf = pe.Workflow(name='cwas_workflow')
    wf.base_dir = c.workingDirectory
    
    cw = create_cwas()
    cw.inputs.inputspec.roi         = c.cwasROIFile
    cw.inputs.inputspec.subjects    = spaths
    cw.inputs.inputspec.regressor   = regressor
    cw.inputs.inputspec.cols        = c.cwasRegressorCols
    cw.inputs.inputspec.f_samples   = c.cwasFSamples
    cw.inputs.inputspec.strata      = c.cwasRegressorStrata # will stay None?
    cw.inputs.inputspec.parallel_nodes = c.cwasParallelNodes
    cw.inputs.inputspec.memory_limit = c.cwasMemory
    cw.inputs.inputspec.dtype       = c.cwasDtype
    
    ds = pe.Node(nio.DataSink(), name='cwas_sink')
    out_dir = os.path.dirname(s_paths[0]).replace(s_ids[0], 'cwas_results')
    ds.inputs.base_directory = out_dir
    ds.inputs.container = ''

    wf.connect(cw, 'outputspec.F_map',
               ds, 'F_map')
    wf.connect(cw, 'outputspec.p_map',
               ds, 'p_map')

    wf.run(plugin='MultiProc',
                         plugin_args={'n_procs': c.numCoresPerSubject})
Example #20
0
def set_num_threads(nt=1, disp=1):
    """see https://github.com/numbbo/coco/issues/1919
    and https://twitter.com/jeremyphoward/status/1185044752753815552
    """
    try:
        import mkl
    except ImportError:
        disp and print("mkl is not installed")
    else:
        mkl.set_num_threads(nt)
    nt = str(nt)
    for name in [
            'OPENBLAS_NUM_THREADS', 'NUMEXPR_NUM_THREADS', 'OMP_NUM_THREADS',
            'MKL_NUM_THREADS'
    ]:
        os.environ[name] = nt
    disp and print("setting mkl threads num to", nt)
Example #21
0
def run_generation_games(gen_dir, ga_popsize, my_network, my_timeout, train_env, cst_decks, ini_stack, sb_amount, nb_hands, q):
    mkl.set_num_threads(64)
    # Neural network layer size reference
    ref_full_dict = DeepBot(network=my_network).full_dict
    #Empty jobs list
    jobs = []
    for bot_id in range(1,ga_popsize+1):
        #Load the bot
        with open(gen_dir+'/bots/'+str(bot_id)+'/bot_'+str(bot_id)+'_flat.pkl', 'rb') as f:
            deepbot_flat = pickle.load(f)
            deepbot_dict = get_full_dict(all_params = deepbot_flat, ref_full_dict = ref_full_dict)
            deepbot = DeepBot(id_=bot_id, network=my_network, full_dict = deepbot_dict)
        #Enqueue job to play bot's games
        try:
            jobs.append(q.enqueue(run_games, timeout=my_timeout, kwargs = dict(train_env=train_env, deepbot=deepbot, cst_decks = cst_decks, ini_stack = ini_stack, sb_amount=sb_amount, nb_hands = nb_hands)))
        except ConnectionError:
            print('Currently not connected to redis server')
            continue

    last_enqueue_time = time.time()
    # Fetch jobs' statusses every second
    while True:
        for i in range(len(jobs)):
            if jobs[i].result is not None and not isinstance(jobs[i], FakeJob):
                jobs[i] = FakeJob(jobs[i])
        all_earnings = [j.result for j in jobs]
        time.sleep(1) #1 second
        # If all jobs are done, break
        if None not in all_earnings:
            break
        # If jobs are not finished after timeout threshold, reenqueue.
        # Helps when connection occasionaly breaks. May also be the sign of an error in u_training_games.py.
        if time.time() - last_enqueue_time > my_timeout:
            print('Reenqueuing unfinished jobs '+ str({sum(x is None for x in all_earnings)}))
            for i in range(len(jobs)):
                if jobs[i].result is None:
                    try:
                        jobs[i].cancel()
                        jobs.append(q.enqueue(run_games, timeout=my_timeout, kwargs = dict(train_env=train_env, deepbot=deepbot, cst_decks = cst_decks, ini_stack = ini_stack, sb_amount=sb_amount, nb_hands = nb_hands)))
                    except ConnectionError:
                        print('Currently not connected to redis server')
                        continue
            last_enqueue_time = time.time()
            if verbose:
                print("Number of jobs remaining: " + str(sum([all_earnings[i]==None for i in range(len(all_earnings))])))
    return all_earnings
Example #22
0
    def __enter__(self):
        """
        Construct the run time environment
        """

        mkl.set_num_threads(self._threads_num)

        if self._rd:
            file_name = "Log {0}.txt".format(strftime("%Y-%m-%d %H-%M-%S"))
            fp = open(self._path / file_name, 'w', buffering=1)
            self._stdout = sys.stdout
            sys.stdout = self._fp = fp

            print("Entering run time environment at {0}".format(
                strftime(TIME_FORMAT)))
            print("=" * 80, flush=True)
        return self
Example #23
0
def _set_external_fft_threads(n):
    if DTMMConfig.thread_pool:
        #disable native threading if we are to use threadpool
        num = 1
    else:
        num = n
    try:
        import mkl
        mkl.set_num_threads(num)
    except ImportError:
        pass
    try:
        import pyfftw
        #threadpool does not seem to work properly with pyfftw, so we use n and disable it in fft.py
        pyfftw.config.NUM_THREADS = n
    except ImportError:
        pass
Example #24
0
def dtest(pos_nodes=0, effect=0.0, dist="euclidean", n=100, nodes=400, nperms=4999, iters=100):
    import mkl
    mkl.set_num_threads(2)
    
    print "Start"

    #print "Categorical Effect"
    grp  = np.repeat([0, 1], n/2)
    np.random.shuffle(grp)

    #print "Design Matrix"
    design = np.vstack((np.ones(n), grp)).T
    cols = [1]

    #print "Distance Matrices"
    dmats = np.zeros((iters,n,n))
    for i in xrange(iters):
        #if (i % 10) == 0:
        #    print i,
        # Data
        ## Fist, I created the matrix with the random data
        points = np.random.standard_normal((n,nodes))
        ## Second, I select a random selection of nodes to add the effect
        neg_nodes = nodes - pos_nodes
        change_nodes = np.repeat([0,1], [neg_nodes, pos_nodes])
        np.random.shuffle(change_nodes)
        ## Finally, I add the effect to a select subjects and nodes
        for i in (change_nodes==1).nonzero()[0]:
            points[grp==1,i] += effect
        
        # Compute Distances
        if dist == "euclidean":
            dmat    = euclidean_distances(points)
        elif dist == "pearson":
            dmat    = compute_distances(points)
        else:
            raise Exception("Unknown distance measure %s" % dist)
        dmats[i]    = dmat
    #print ""

    #print "MDMR"
    pvals = []; Fvals = [];
    pvals, Fvals, _, _ = mdmr(dmats, design, cols, nperms)
    
    #print "Done"
    return pvals, Fvals
Example #25
0
def _initialize_mp_worker(mkey, func, threads, log_queue):
    _initialize_worker(log_queue)
    global __work_model, __work_func, __is_mp_worker
    __work_model = mkey
    __work_func = func
    __is_mp_worker = True

    import numba
    numba.config.NUMBA_NUM_THREADS = threads
    try:
        import mkl
        _log.debug('configuring Numba thread count')
        mkl.set_num_threads(threads)
    except ImportError:
        pass

    _log.debug('worker %d ready', os.getpid())
Example #26
0
    def initLUSolver(self):
        """LU solver initlization

        Due to the A matrix never change, expansive LU factorization can only run once

        Author:Bin Wang([email protected])
        Date: April. 2020
        """
        #Assemble linear system for BEM
        for domain in self.BEMobjs:
            A, b, B = domain.AssembleMatrix()
            #Compute LU factorization at the beginning
            self.LU_pivs.append(lu_factor(A))
            self.bs.append(b)
            self.Bs.append(B)

        #Set disable mkl backend multithreading
        mkl.set_num_threads(1)
Example #27
0
def main():
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                        level=logging.INFO)
    mkl.set_num_threads(3)

    parser = argparse.ArgumentParser("Create topics from word2vec model")
    parser.add_argument("--embedding-model", type=str)
    parser.add_argument("--topic-model", type=str)
    args = parser.parse_args()

    assert "topic" in args.topic_model, "'%s' not a topic model" % args.topic_model
    assert "embedding" in args.embedding_model, "'%s' not an embedding model" % args.embedding_model

    word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)

    calculate_similarities(word2vec,
                           os.path.basename(args.embedding_model),
                           args.topic_model,
                           all_pairwise=False)
Example #28
0
def main(ctx, config, debug):
    '''leitmotiv - Extract Trends from Image Collections'''
    if debug:
        __LOGGER_OPTIONS['handlers']['console']['level'] = 'DEBUG'
        __LOGGER_OPTIONS['loggers']['leitmotiv']['level'] = 'DEBUG'
        __LOGGER_OPTIONS['loggers']['peewee'] = {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False
        }
        logging.config.dictConfig(__LOGGER_OPTIONS)

    yaml = YAML()
    with open(config) as f:
        config = yaml.load(f)['leitmotiv']

    mkl.set_num_threads(config['num_threads'])
    library.LIBRARY_PATH = config['library']['path']
    ctx.obj = config
Example #29
0
def set_threads(num_threads=None, verbose=False, no_guessing=False):
    '''
    Get and set the number of threads used by FFT libraries.

    Parameters
    ----------
    num_threads : int, default None
        Number of threads requested. If None, do not set threads.
    verbose : bool, default False
        If True, output debug messages to STDOUT.
    no_guessing : bool, default false
        If False and MKL is not found at all, return a guess of 1 thread
        since numpy.fft and scipy.fftpack are single-threaded without MKL.
        If True, return len(os.sched_getaffinity(0)) or os.cpu_count().

    Returns
    -------
    int or None
        The number of threads successfully set, or None on failure.
    '''

    try:
        import mkl
    except ImportError:
        if hasattr(np, '__mkl_version__') or no_guessing:
            # MKL present but no mkl-service, so guess number of CPUs
            if verbose:
                print(f'TAG: WARNING: mkl-service module was not '
                      f'found. Number of threads is likely inaccurate!')
            if hasattr(os, 'sched_getaffinity'):
                return len(os.sched_getaffinity(0)), 'os.sched_getaffinity'
            else:
                return os.cpu_count(), 'os.cpu_count'
        else:
            # no MKL, so assume not threaded
            return 1, 'guessing'
    else:
        if num_threads:
            mkl.set_num_threads(num_threads)
        return mkl.get_max_threads(), 'mkl.get_max_threads'

    return None, None
Example #30
0
    def run(self):
        """
        Run the forward simulation
        """

        # ----------------- Validate Parameters ----------------- #

        print('Validating parameters...')
        self.validate()

        sim_mesh = self.meshGenerator.mesh # grab the discretize mesh off of the mesh object
        print('      max x: {}, min z: {}, max z: {}'.format(
            sim_mesh.vectorNx.max(),
            sim_mesh.vectorNz.min(),
            sim_mesh.vectorNz.max()
        ))

        # save simulation parameters
        self.save()

        # --------------- Set the number of threads --------------- #
        mkl.set_num_threads(self.num_threads)

        # ----------------- Set up the simulation ----------------- #
        physprops = self.physprops
        prb = self.prob
        # survey = self.survey
        # prb.pair(survey)

        # ----------------- Run the the simulation ----------------- #
        print('Starting Simulation')
        t = time.time()
        fields = prb.fields(physprops.model)
        np.save(
            '/'.join([self.directory, self.fields_filename]),
            fields[:, '{}Solution'.format(self.formulation)]
        )
        print('   ... Done. Elapsed time : {}'.format(time.time()-t))

        self._fields = fields
        return fields
Example #31
0
    def __init__(self,
                 fingerprintsA,
                 fingerprintsB=None,
                 chemicalKernelmat=None,
                 nthreads=4):

        self.dtype = 'float64'
        self.nthreads = nthreads
        try:
            import mkl
            mkl.set_num_threads(self.nthreads)
        except:
            raise Warning(
                'NUMPY DOES NOT SEEM TO BE LINKED TO MKL LIBRARY SO NTHREADS IS IGNORED'
            )

        self.fingerprintsA = fingerprintsA
        self.fingerprints_infoA = self.get_info(fingerprintsA)
        pairsA = self.fingerprints_infoA['pairs']
        Nframe = len(fingerprintsA)
        if fingerprintsB is not None:
            self.fingerprintsB = fingerprintsB
            self.fingerprints_infoB = self.get_info(fingerprintsB)
            pairsB = self.fingerprints_infoB['pairs']
            Mframe = len(fingerprintsB)
        else:
            self.fingerprintsB = None
            pairsB = pairsA
            Mframe = Nframe

        # initialize data container
        self._storage = {
            pA + pB: np.zeros((Nframe, Mframe), dtype=self.dtype)
            for pA in pairsA for pB in pairsB
        }

        self.set_partial_kernels()

        self.chemicalKernelmat = chemicalKernelmat
        self.set_kernel(chemicalKernelmat)
Example #32
0
def disable_multi_threading():
    import ctypes
    from ctypes.util import find_library
    import os

    # OpenBLAS-based multi-threading libraries
    try_paths = [
        '/opt/OpenBLAS/lib/libopenblas.so', '/lib/libopenblas.so',
        '/usr/lib/libopenblas.so.0',
        find_library('openblas')
    ]
    openblas_lib = None
    for libpath in try_paths:
        try:
            openblas_lib = ctypes.cdll.LoadLibrary(libpath)
            break
        except (OSError, TypeError):
            continue

    if openblas_lib is not None:
        try:
            openblas_lib.openblas_set_num_threads(1)
        except:
            pass

    # MKL-based multi-threading libraries
    try:
        import mkl
        mkl.set_num_threads(1)
    except:
        pass

    # Set OS variables
    os.environ["OMP_NUM_THREADS"] = "1"  # export OMP_NUM_THREADS=1
    os.environ["OPENBLAS_NUM_THREADS"] = "1"  # export OPENBLAS_NUM_THREADS=1
    os.environ["MKL_NUM_THREADS"] = "1"  # export MKL_NUM_THREADS=1
    os.environ[
        "VECLIB_MAXIMUM_THREADS"] = "1"  # export VECLIB_MAXIMUM_THREADS=4
    os.environ["NUMEXPR_NUM_THREADS"] = "1"  # export NUMEXPR_NUM_THREADS=6
Example #33
0
def _initialize_mp_worker(mkey, func, threads, log_queue):
    _initialize_worker(log_queue)
    global __work_model, __work_func

    nnt_env = os.environ.get('NUMBA_NUM_THREADS', None)
    if nnt_env is None or int(nnt_env) > threads:
        _log.debug('configuring Numba thread count')
        import numba
        numba.config.NUMBA_NUM_THREADS = threads
    try:
        import mkl
        _log.debug('configuring MKL thread count')
        mkl.set_num_threads(threads)
    except ImportError:
        pass

    __work_model = mkey
    # deferred function unpickling to minimize imports before initialization
    __work_func = pickle.loads(func)

    _log.debug('worker %d ready (process %s)', os.getpid(),
               mp.current_process())
Example #34
0
def mpirun(f, arguments, comm=MPI.COMM_WORLD, bcast=True):
    '''
    Wrapper for the parallel running of f using the mpi4py.

    Parameters
    ----------
    f : callable
        The function to be parallelly run using the mpi4py.
    arguments : list of tuple
        The list of arguments passed to the function f.
    comm : MPI.Comm, optional
        The MPI communicator.
    bcast : True or False
        When True, broadcast the result for all processes;
        Otherwise only the rank 0 process hold the result.

    Returns
    -------
    list
        The returned values of f with respect to the arguments.
    '''
    size = comm.Get_size()
    rank = comm.Get_rank()
    if size > 1:
        import mkl
        mkl.set_num_threads(1)
    temp = []
    for i, argument in enumerate(arguments):
        if i % size == rank:
            temp.append(f(*argument))
    temp = comm.gather(temp, root=0)
    result = []
    if rank == 0:
        for i in range(len(arguments)):
            result.append(temp[i % size][i // size])
    if bcast:
        result = comm.bcast(result, root=0)
    return result
Example #35
0
    def __init__(self, *args, **kwargs):
        super(Worker, self).__init__()

        self.nthreads = pyrat._nthreads  # number of threads for processing
        if pyrat._debug is True:
            self.nthreads = 1

        try:
            import mkl
            if self.nthreads > 1:  # switch of mkl multithreading
                mkl.set_num_threads(1)  # because we do it ourself
            else:
                mkl.set_num_threads(999)
        except ImportError:
            pass

        # self.blockprocess = True                                               # blockprocessing on/off
        # self.blocksize = 128                                                   # size of single block

        for para in self.para:  # copy defaults to self
            setattr(self, para['var'], para['value'])
        for (k, v) in kwargs.items():  # copy keywords to self
            setattr(self, k, v)  # eventually overwriting defaults
        if not hasattr(self, 'layer'):  # if no keyword was used
            self.layer = pyrat.data.active  # use active layer
        # --------------------------------------------------

        self.name = self.__class__.__name__  # name of worker class (string)
        self.input = ''  # input layer(s)
        self.output = ''  # output layer(s)
        self.blockoverlap = 0  # block overlap
        self.vblock = False  # vertical blocks on/off
        self.blocks = []  # list of block boundaries
        self.valid = []  # valid part of each block
        # self.block = False                                                     # actual block range / validity
        self.allowed_ndim = False
        self.require_para = False
        self.allowed_dtype = False
Example #36
0
def mpirun(f,arguments,comm=MPI.COMM_WORLD,bcast=True):
    '''
    Wrapper for the parallel running of f using the mpi4py.

    Parameters
    ----------
    f : callable
        The function to be parallelly run using the mpi4py.
    arguments : list of tuple
        The list of arguments passed to the function f.
    comm : MPI.Comm, optional
        The MPI communicator.
    bcast : True or False
        When True, broadcast the result for all processes;
        Otherwise only the rank 0 process hold the result.

    Returns
    -------
    list
        The returned values of f with respect to the arguments.
    '''
    size=comm.Get_size()
    rank=comm.Get_rank()
    if size>1:
        import mkl
        mkl.set_num_threads(1)
    temp=[]
    for i,argument in enumerate(arguments):
        if i%size==rank:
            temp.append(f(*argument))
    temp=comm.gather(temp,root=0)
    result=[]
    if rank==0:
        for i in range(len(arguments)):
            result.append(temp[i%size][i//size])
    if bcast:
        result=comm.bcast(result,root=0)
    return result
Example #37
0
def main():  
	
    file = open("w8a.txt", "r")
    
    labels = []
    features = []
	
    for line in file:
        mylist = line.split(" ")
        labels.append(int(mylist[0]))
        example = [0 for i in range(300)]
		
        for i in range(1,len(mylist)-1):	
           indexAndValue = mylist[i].split(":")
           
           index = indexAndValue[0]
           index = int(index)
           example[index-1] = int(indexAndValue[1]) 
           
        features.append(example)
    
    labels = np.array(labels)
    features = np.array(features)
    
    initial_w = np.random.random_sample(300)
    initial_w = initial_w * (math.sqrt(1/150))
    
    num_iterations = 1000
    learning_rate = 0.001
    decay = learning_rate/num_iterations
    mkl.set_num_threads(4)
    
    print("Starting Gradient Descent at loss = {}".format(compute_loss(initial_w, features,labels)))
    print("Running...")
 
    w = gradient_descent_runner(initial_w, features, labels, learning_rate, num_iterations, decay)
    
    print("After {0} iterations, loss = {1}, Elapsed Time = {2} ".format(num_iterations, compute_loss(w, features, labels), total_time))
Example #38
0
    def __init__(self, *args, **kwargs):
        super(Worker, self).__init__()

        self.nthreads = pyrat._nthreads  # number of threads for processing
        if pyrat._debug is True:
            self.nthreads = 1

        try:
            import mkl
            if self.nthreads > 1:  # switch of mkl multithreading
                mkl.set_num_threads(1)  # because we do it ourself
            else:
                mkl.set_num_threads(999)
        except ImportError:
            pass

        # self.blockprocess = True                                               # blockprocessing on/off
        # self.blocksize = 128                                                   # size of single block

        for para in self.para:  # copy defaults to self
            setattr(self, para['var'], para['value'])
        for (k, v) in kwargs.items():  # copy keywords to self
            setattr(self, k, v)  # eventually overwriting defaults
        if not hasattr(self, 'layer'):  # if no keyword was used
            self.layer = pyrat.data.active  # use active layer
        # --------------------------------------------------

        self.name = self.__class__.__name__  # name of worker class (string)
        self.input = ''  # input layer(s)
        self.output = ''  # output layer(s)
        self.blockoverlap = 0  # block overlap
        self.vblock = False  # vertical blocks on/off
        self.blocks = []  # list of block boundaries
        self.valid = []  # valid part of each block
        # self.block = False                                                     # actual block range / validity
        self.allowed_ndim = False
        self.require_para = False
        self.allowed_dtype = False
Example #39
0
    def __exit__(self, exc_type, exc_value, traceback):
        """
        Close the opened file and restore sys.stdout before exit
        """

        mkl.set_num_threads(mkl.get_max_threads())

        if self._rd:
            print("=" * 80)
            if exc_type is None:
                print("Non exception has occurred!")
            else:
                print("Exc_type: {0}".format(exc_type))
                print("Exc_value: {0}".format(exc_value))
                print("Traceback:")
                print_tb(traceback, file=self._fp)
            print("=" * 80)
            print("Exit run time environment at: {0}".format(
                strftime(TIME_FORMAT)),
                  flush=True)
            sys.stdout = self._stdout
            self._fp.close()
        return False
Example #40
0
def start_benchmark():
    print "Benchmark starting timing with numpy %s\nVersion: %s" % (numpy.__version__, sys.version)
    print ("-" * 80)

    for cur_threads in threads_range:
        header_set = False

        # This doesn't work: os.environ is not adjusted        
        #os.environ[THREADS_LIMIT_ENV] = '%d' % cur_threads
        
        mkl.set_num_threads(cur_threads)
        print "Maximum number of threads used for computation is : %d" % cur_threads

        header_str = "%20s" % "Function"
        header_str += ' - %9s - Speedup' % 'Time [ms]'

        if cur_threads == 1:        
            timings_single = []

        for ii,fun in enumerate(tests):

            result_str = "%20s" % fun.__name__
            t = timeit.Timer(stmt="%s()" % fun.__name__, setup="from __main__ import %s" % fun.__name__)
            res = t.repeat(repeat=3, number=1)
            timing =  1000.0 * sum(res)/len(res)
            
            if cur_threads == 1:        
                timings_single.append(timing)
        
            result_str += ' - %9.1f - %5.1f' % (timing, timings_single[ii]/timing)
                 
            if not header_set is True:
                print header_str
                header_set = True
        
            print result_str    
Example #41
0
### TODO: Need to be test yet!
import mkl
mkl.set_num_threads(56)
import json
import torch
from sqlnet.utils import *
from sqlnet.model.seq2sql import Seq2SQL
from sqlnet.model.sqlnet import SQLNet
import numpy as np
import datetime
from logger import Logger
import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--constraint', action='store_true', 
            help='If set, use constraint data; used to test constraint query translation.')
    parser.add_argument('--toy', action='store_true', 
            help='If set, use small data; used for fast debugging.')
    parser.add_argument('--suffix', type=str, default='',
            help='The suffix at the end of saved model name.')
    parser.add_argument('--ca', action='store_true',
            help='Use conditional attention.')
    parser.add_argument('--dataset', type=int, default=0,
            help='0: original dataset, 1: re-split dataset')
    parser.add_argument('--rl', action='store_true',
            help='Use RL for Seq2SQL(requires pretrained model).')
    parser.add_argument('--baseline', action='store_true', 
            help='If set, then train Seq2SQL model; default is SQLNet model.')
    parser.add_argument('--train_emb', action='store_true',
            help='Train word embedding for SQLNet(requires pretrained model).')
Example #42
0
from rglib.mps import MPS
sys.path.insert(0,'../')
from model_rashba.views import majorana4mps,majorana4vec,dos4mps,dos4vec,sweep_fidelity,sweep_fidelity2
from model_bizard.views import smajorana4mps
from localsetting import ONSV
from models import *

#MPI setting
try:
    from mpi4py import MPI
    COMM=MPI.COMM_WORLD
    SIZE=COMM.Get_size()
    RANK=COMM.Get_rank()
    #if SIZE!=1:
    import mkl
    mkl.set_num_threads(1)
except:
    print 'WARNING, NOT USING MULTITHREADING.'
    COMM=None
    SIZE=1
    RANK=0


def heisenberg_vmps(J,Jz,h,nsite,append=False):
    '''
    Run vMPS for Heisenberg model.
    '''
    filename='mps_heisenberg_%s.dat'%(nsite)
    model=HeisenbergModel(J=J,Jz=Jz,h=h,nsite=nsite)
    if append:
        mps=MPS.load(filename)
Example #43
0
File: mkl.py Project: lelegan/modl
 def __enter__(self):
     mkl.set_num_threads(self.num_threads)
Example #44
0
    def execute(self):
        #import the algorithm module
        try:
            importStr = 'from algorithm.rating.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        except ImportError:
            importStr = 'from algorithm.ranking.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        if self.evaluation.contains('-cv'):
            k = int(self.evaluation['-cv'])
            if k <= 1 or k > 10:
                k = 3

            mkl.set_num_threads(max(1, mkl.get_max_threads() / k))

            #create the manager
            manager = Manager()
            m = manager.dict()
            i = 1
            tasks = []

            binarized = False
            if self.evaluation.contains('-b'):
                binarized = True

            for train, test in DataSplit.crossValidation(self.trainingData,
                                                         k,
                                                         binarized=binarized):
                fold = '[' + str(i) + ']'
                if self.config.contains('social'):
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,self.relation,fold)"
                else:
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,fold)"
            #create the process
                p = Process(target=run, args=(m, eval(recommender), i))
                tasks.append(p)
                i += 1
            #start the processes
            for p in tasks:
                p.start()
                if not self.evaluation.contains('-p'):
                    p.join()
            #wait until all processes are completed
            if self.evaluation.contains('-p'):
                for p in tasks:
                    p.join()
            #compute the mean error of k-fold cross validation
            self.measure = [dict(m)[i] for i in range(1, k + 1)]
            res = []
            for i in range(len(self.measure[0])):
                if self.measure[0][i][:3] == 'Top':
                    res.append(self.measure[0][i])
                    continue
                measure = self.measure[0][i].split(':')[0]
                total = 0
                for j in range(k):
                    total += float(self.measure[j][i].split(':')[1])
                res.append(measure + ':' + str(total / k) + '\n')
            #output result
            currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
            outDir = LineConfig(self.config['output.setup'])['-dir']
            fileName = self.config[
                'recommender'] + '@' + currentTime + '-' + str(
                    k) + '-fold-cv' + '.txt'
            FileIO.writeFile(outDir, fileName, res)
            print('The result of %d-fold cross validation:\n%s' %
                  (k, ''.join(res)))

        else:
            if self.config.contains('social'):
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData,self.relation)'
            else:
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData)'
            eval(recommender).execute()
Example #45
0
	def __init__(self): 

		import os, sys
		MoleculeName = ''
		if len(sys.argv) < 2:
			print "Ideally you give me a molecule name, Defaulting to /Integrals"
		else: 
			MoleculeName = sys.argv[1]

		self.MoleculeName = MoleculeName # Defined Globally in Main.py 
		self.nocc = 4
		self.nvirt = 4
		self.nmo = 8    # These will be determined on-the-fly reading from disk anyways. 
		self.occ = []
		self.virt = []
		self.all = []
		self.alpha = []
		self.beta = []

		# Try to get a faster timestep by freezing the CoreOrbitals. 
		# if it's not None, then freeze that many orbitals (evenly alpha and beta.) 
		self.FreezeCore = 8 

		self.AvailablePropagators = ["phTDA2TCL","Whole2TCL", "AllPH"]
		self.Propagator = "phTDA2TCL"
		self.Correlated = True
		self.SecularApproximation = 0 # 0 => Nonsecular 1=> Secular 
		self.ImaginaryTime = True
		
		self.Temperature = 303.15*2
		self.TMax =  250.0 
		self.TStep = 0.01
		self.tol = 1e-9
		self.Safety = .9 #Ensures that if Error = Emax, the step size decreases slightly
		self.RK45 = True
		self.LoadPrev = False
#		self.MarkovEta = 0.05
		self.MarkovEta = 0.001
		self.Tc = 3000.0
		self.Compiler = "gcc"
		self.Flags = ["-mtune=native", "-O3"]
		self.latex = True
		self.fluorescence = True # If true, this performs the fluorescence calculations in SpectralAnalysis
		# above a certain energy 
		# no relaxation is included and the denominator is the bare electronic 
		# denominator to avoid integration issues.
		self.UVCutoff = 300.0/27.2113 # No Relaxation above 1eV		

		try:
			mkl.set_num_threads(8)
		except NameError:
			print "No MKL so I can't set the number of threads"
		
		# This is a hack switch to shut off the Boson Correlation Function 
		# ------------------------------------------------------------------------
		# Adiabatic = 0 # Implies numerical integration of expdeltaS and bosons. 
		# Adiabatic = 1 # Implies numerical integration of expdeltaS no bosons. 
		# Adiabatic = 2 # Implies analytical integration of expdeltaS, no bosons.  
		# Adiabatic = 3 # Implies analytical integration of expdeltaS, no bosons, and the perturbative terms are forced to be anti-hermitian. 		
		# Adiabatic = 4 # Implies Markov Approximation. 
		self.Adiabatic = 0

		self.Inhomogeneous = False
		self.InhomogeneousTerms = self.Inhomogeneous
		self.Inhomogenous = False
		self.Undressed = True
		self.ContBath = True # Whether the bath is continuous/There is a continuous bath. 
		self.ReOrg = True 
		self.FiniteDifferenceBosons = False

		self.DipoleGuess = True # if False, a superposition of bright states will be used. 
		self.AllDirections = True # Will initalize three concurrent propagations. 
		self.DirectionSpecific = True		
		
		self.InitialDirection = -1 # 0 = x etc. Only excites in the x direction -1=isotropic
		self.BeginWithStatesOfEnergy = None 
#		self.BeginWithStatesOfEnergy = 18.7/27.2113
#		self.BeginWithStatesOfEnergy = 18.2288355687/27.2113
		self.PulseWidth = 1.7/27.2113
		
		self.Plotting = True
		self.DoCisDecomposition = True
		self.DoBCT = True # plot and fourier transform the bath correlation tensor. 
		self.DoEntropies = True
		if (self.Undressed): 
			self.DoEntropies = False
		self.FieldThreshold = pow(10.0,-7.0)
		self.ExponentialStep = False  #This will be set automatically if a matrix is made. 

		self.LegendFontSize = 14 
		self.LabelFontSize = 16
		
		print "--------------------------------------------"		
		print "Running With Overall Parameters: "
		print "--------------------------------------------"
		print "self.MoleculeName", self.MoleculeName 
		print "self.AllDirections", self.AllDirections 
		print "self.Propagator", self.Propagator 
		print "self.Temperature", self.Temperature 
		print "self.TMax", self.TMax 
		print "self.TStep", self.TStep 
		print "self.MarkovEta", self.MarkovEta
		print "self.Adiabatic", self.Adiabatic
		print "self.Inhomogeneous", self.Inhomogeneous
		print "self.Undressed", self.Undressed
		print "self.Correlated", self.Correlated 
		print "self.SecularApproximation", self.SecularApproximation
		print "self.DipoleGuess", self.DipoleGuess
		print "self.BeginWithStatesOfEnergy ", self.BeginWithStatesOfEnergy
		print "self.DoCisDecomposition", self.DoCisDecomposition 
		print "self.DoBCT", self.DoBCT 
		print "self.DoEntropies", self.DoEntropies 
		print "self.FieldThreshold", self.FieldThreshold 
		return 
#!/usr/bin/env python

import os, sys
from os import path
sys.path.append("/home2/data/Projects/CWAS/pyClusterROI")

if len(sys.argv) != 2:
    sys.exit("Usage: %s num-threads" % sys.argv[0])

# control mkl
import mkl
mkl.set_num_threads(int(sys.argv[1]))


###
# 1. SETUP
###

print "1. Setup"

obase = "/home2/data/Projects/CWAS/development+motion/spatial_cluster"

# functions for connectivity metric
from make_local_connectivity_ones import *

# name of the maskfile that we will be using
roidir = "/home2/data/Projects/CWAS/share/development+motion/rois"
maskfile = path.join(roidir, "mask_gray_4mm.nii.gz")


###
Example #47
0
def parmap(f,args,workers=None):
  '''  
  evaluates [f(a) for a in args] in parallel

  if workers is 0 then the built-in map is used. If workers is greater 
  than one then the parent process spawns that many worker processes to 
  evaluate the map.
  '''
  starting_threads = mkl.get_max_threads()
  if workers is None:
    # starting_threads is a good estimate for the number of processes 
    # that can be simultaneously running
    workers = starting_threads

  if workers < 0:
    raise ValueError('number of worker processes must be 0 or greater')
    
  if workers == 0:
    # use the built-in sequential map 
    return map(f,args)

  # make sure that lower level functions are not running in parallel
  mkl.set_num_threads(1)

  # q_in has a max size of 1 so that args is not copied over to 
  # the next process until absolutely necessary
  q_in = Queue(1)
  q_out = Queue()
  # any exceptions found by the child processes are put in this queue 
  # and then raised by the parent
  q_err = Queue()

  # spawn worker processes
  procs = []
  for i in range(workers):
    p = Process(target=_f,args=(f,q_in,q_out,q_err))
    # process is starting and waiting for something to be put on q_in
    p.start()
    procs += [p] 

  submitted_tasks = 0
  for a in args:
    q_in.put((submitted_tasks,a))
    submitted_tasks += 1

  # indicate that nothing else will be added
  for i in range(workers):
    q_in.put(('DONE',None))


  # allocate list of Nones and then fill it in with the results
  val_list = [None for i in range(submitted_tasks)]
  err_list = [None for i in range(submitted_tasks)]
  for i in range(submitted_tasks):
    idx,err = q_err.get()
    err_list[idx] = err
    idx,val = q_out.get()
    val_list[idx] = val

  # terminate all processes
  for p in procs:
    p.join()

  # close queues
  q_in.close()
  q_out.close()
  q_err.close()

  # raise an error if any were found
  if any([e is not None for e in err_list]):
    raise ParmapError(err_list)

  # reset the number of threads to its original value
  mkl.set_num_threads(starting_threads)
  return val_list
Example #48
0
def parmap(f,args,workers=None):
  '''  
  evaluates [f(a) for a in args] in parallel

  if workers is 0 then the built-in map is used. If workers is greater 
  than one then the parent process spawns that many worker processes to 
  evaluate the map. 
  
  Parameters
  ----------
  f : callable

  a : list
    list of arguments to *f*
    
  workers : int, optional
    number of subprocess to spawn. Defaults to half the available 
    cores plus one

  NOTES
  -----
  If the *mkl* package is installed then this function first sets the 
  maximum number of allowed threads per process to 1. This is to help 
  prevents spawned subprocesses from using multiple cores. The number 
  of allowed threads is reset after all subprocesses have finished.
    
  '''
  if workers is None:
    # default number of processes to have simultaneously running
    workers = cpu_count()//2 + 1

  if workers < 0:
    raise ValueError('number of worker processes must be 0 or greater')
    
  if workers == 0:
    # perform the map on the parent process
    return [f(i) for i in args]

  # attempt to prevent lower level functions from running in parallel
  if _HAS_MKL:
    starting_threads = mkl.get_max_threads()
    mkl.set_num_threads(1)

  # q_in has a max size of 1 so that args is not copied over to 
  # the next process until absolutely necessary
  q_in = Queue(1)
  q_out = Queue()
  # any exceptions found by the child processes are put in this queue 
  # and then raised by the parent
  q_err = Queue()

  # spawn worker processes
  procs = []
  for i in range(workers):
    p = Process(target=_f,args=(f,q_in,q_out,q_err))
    # process is starting and waiting for something to be put on q_in
    p.start()
    procs += [p] 

  submitted_tasks = 0
  for a in args:
    q_in.put((submitted_tasks,a))
    submitted_tasks += 1

  # indicate that nothing else will be added
  for i in range(workers):
    q_in.put(('DONE',None))


  # allocate list of Nones and then fill it in with the results
  val_list = [None for i in range(submitted_tasks)]
  err_list = [None for i in range(submitted_tasks)]
  for i in range(submitted_tasks):
    idx,err = q_err.get()
    err_list[idx] = err
    idx,val = q_out.get()
    val_list[idx] = val

  # terminate all processes
  for p in procs:
    p.join()

  # close queues
  q_in.close()
  q_out.close()
  q_err.close()

  # raise an error if any were found
  if any([e is not None for e in err_list]):
    raise ParmapError(err_list)

  # reset the number of threads to its original value
  if _HAS_MKL:
    mkl.set_num_threads(starting_threads)
    
  return val_list
Example #49
0
# How to use mkl
# mkl module exposes functions which are declared in mkl_service.h.
# とりあえず,mkl.set_num_threadsだけ覚えておく

# mklのインポート
import mkl
# スレッド数の設定
# マルチコアCPUでは効果を発揮する
mkl.set_num_threads(2)#CPUコア数に合わせておくと良い
Example #50
0
                    help="Number of threads to parallel processes for Intel MKL")

# Output
parser.add_argument('-o', '--outdir', default=os.getcwd(), help="Output directory")



###
# Parse and Read User Args
###

args = parser.parse_args()

try:
    import mkl
    mkl.set_num_threads(args.nthreads)
except ImportError:
    pass

if not args.degree and not args.eigen:
    raise SystemExit("--degree and/or --eigen must be specified")
method_options = [args.degree, args.eigen]

if not args.binarize and not args.weighted:
    raise SystemExit("--binarize and/or --weighted must be specified")
weight_options = [args.binarize, args.weighted]

if args.pvalue is not None:
    option = 0
    threshold = args.pvalue
elif args.sparsity is not None:
Example #51
0
import numpy
import numpy.fft as fft
numpy.use_fastnumpy = True
import time
#from scipy.fftpack import fft
import mkl

print 'Intel MKL version:', mkl.get_version_string()
print 'Intel cpu_clocks:', mkl.get_cpu_clocks()
print 'Intel cpu_frequency:', mkl.get_cpu_frequency()
#print 'Intel MKL, freeing buffer memory:', mkl.thread_free_buffers()

print 'max Intel threads:', mkl.get_max_threads()

mkl.set_num_threads(2)

N = 2**16

print 'using numpy', numpy.__version__
a = numpy.random.rand(2, N)
print a.shape, 'items'
t0 = time.clock()
for i in range(100):
    continue
base = time.clock()-t0
fftn = fft.fftn
t0 = time.clock()
for i in range(100):
    r = fftn(a, (N,), (1,))
print 'simple loop', time.clock()-t0-base
def main(date, takeSubset=False):
    """
    Reduces the dimensionality of the training data to 3 dimensions, 
    plots the transformed data in 3d space. The idea is to bring
    out separability between the resistance classes which may be 
    hidden in the dimensionality of the data.

    :param date: (string) Data collection date YYYY_MMDD
    :param takeSubset: (boolean) Transform and plot a random subset of
                                 the trainng data?

    :return: (None)
    """

    mkl.set_num_threads(8)

    # Load the training and testing data into memory
    trainX, trainY = FileIO.loadTrainingData(date)

    if takeSubset:
        indices = np.random.choice(range(0, len(trainY)), size=NUM_SAMPLES, replace=False)
        X = trainX[indices,:]
        y = trainY[indices]
    else:
        X = trainX
        y = trainY

    X = np.nan_to_num(X)

    # Break the data into resistance classes
    susIndex = Constants.LABEL_TO_INDEX[Constants.SUSCEPTIBLE]
    drIndex = Constants.LABEL_TO_INDEX[Constants.DR_RESISTANT]
    grIndex = Constants.LABEL_TO_INDEX[Constants.GR_RESISTANT]

    susX = X[y==susIndex, :]
    drX = X[y==drIndex, :]
    grX = X[y==grIndex, :]

    # Transform the data using PCA
    pca = IncrementalPCA(n_components=6)

    pointsSUS = pca.fit_transform(susX)
    pointsGR= pca.fit_transform(grX)
    pointsDR = pca.fit_transform(drX)

    # Plot the transformed data in 3D space
    traceSUS = go.Scatter3d(
        x=pointsSUS[:, 0],
        y=pointsSUS[:, 1],
        z=pointsSUS[:, 2],
        mode='markers',
        marker=dict(
            size=5,
            line=dict(
                color='rgba(255, 0, 0, 0)',
                width=0.1
            ),
            opacity=0
        )
    )

    traceDR = go.Scatter3d(
        x=pointsDR[:, 0],
        y=pointsDR[:, 1],
        z=pointsDR[:, 2],
        mode='markers',
        marker=dict(
            size=5,
            line=dict(
                color='rgba(0, 255, 0, 0)',
                width=0.1
            ),
            opacity=0
        )
    )

    traceGR = go.Scatter3d(
        x=pointsGR[:, 0],
        y=pointsGR[:, 1],
        z=pointsGR[:, 2],
        mode='markers',
        marker=dict(
            size=5,
            line=dict(
                color='rgba(0, 0, 255, 0)',
                width=0.1
            ),
            opacity=0
        )
    )

    data = [traceSUS, traceDR, traceGR]
    fig = go.Figure(data=data)
    py.iplot(fig, filename='3D PCA Wavelength Plot')

    # Plot the principle components
    eigenSpectra = pca.components_

    plt.subplot(3,1,1)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[0, :])
    plt.title("Principle Components 1 - 3")
    plt.subplot(3,1,2)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[1, :])
    plt.subplot(3,1,3)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[2, :])
    plt.xlabel("Wavelength (nm)")
    plt.show()

    plt.clf()
    plt.subplot(3,1,1)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[3, :])
    plt.title("Principle Components 4 - 6")
    plt.subplot(3,1,2)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[4, :])
    plt.subplot(3,1,3)
    plt.plot(Constants.WAVELENGTHS, eigenSpectra[5, :])
    plt.xlabel("Wavelength (nm)")
    plt.show()
Example #53
0
def parallel_loop(args):

    import numpy as np
    import scipy.linalg as la
    import time

    import pysparsefht
    from utils import random_k_sparse

    try:
        import mkl as mkl_service
        # for such parallel processing, it is better 
        # to deactivate multithreading in mkl
        mkl_service.set_num_threads(1)
    except ImportError:
        pass

    K = int(np.round(2**(args[0]*params['n'])))
    B = int(2**params['b'])
    C = int(params['C'])
    algo_name = params['algo_name']
    seed = args[1]

    if algo_name == 'RANDOM':
        algo = pysparsefht.ALGO_RANDOM
    elif algo_name == 'DETERMINISTIC':
        algo = pysparsefht.ALGO_OPTIMIZED
    else:
        ValueError('No such algorithm.')

    # initialize rng
    np.random.seed(seed)

    # a list for return values
    ret = []

    # Run the inner loops
    for i in range(params['inner_loops']):

        # generate a seed for the C RNG
        C_seed = np.random.randint(4294967295, dtype=np.uint32)

        # create sparse vector
        x_hat, y_hat, supp_hat = random_k_sparse(params['N'], K, params['sigma2'])

        # compute WHT
        x = pysparsefht.fht(x_hat)

        # Now apply the SparseFHT
        y_hat2, supp_hat2, unsat, loops = pysparsefht.sparse_fht(x, int(K), int(B), int(C),
                                    max_iter=params['max_iter'],
                                    algo=algo,
                                    req_loops=True, req_unsat=True,
                                    seed=C_seed)

        supp_r = supp_hat2[supp_hat2 > 0]
        y_r = y_hat2[supp_hat2 > 0]

        # reconstructed vector
        x_hat2 = np.zeros(params['N'])
        x_hat2[supp_r] = y_r

        mse = la.norm(x_hat - x_hat2)**2
        supp_size = supp_r.shape[0]
        bit_error = len(set(supp_r).symmetric_difference(set(supp_hat)))
        success = (unsat[0] == 0)

        ret.append([mse, supp_size, bit_error, success, unsat, loops])

    return ret
Example #54
0
def parallel_loop(filename, algo_names, pmt):
    '''
    This is one loop of the computation
    extracted for parallelization
    '''

    # We need to do a bunch of imports
    import pyroomacoustics as pra
    import os
    import numpy as np
    from scipy.io import wavfile
    import mkl as mkl_service
    import copy

    import doa
    from tools import rfft

    # for such parallel processing, it is better 
    # to deactivate multithreading in mkl
    mkl_service.set_num_threads(1)

    # exctract the speaker names from filename
    name = os.path.splitext(os.path.basename(filename))[0]
    sources = name.split('-')

    # number of sources
    K = len(sources)

    # Import speech signal
    fs_file, rec_signals = wavfile.read(filename)

    # sanity check
    if pmt['fs'] != fs_file:
        raise ValueError('The sampling frequency of the files doesn''t match that of the script')
    
    speech_signals = np.array(rec_signals[:,pmt['mic_select']], dtype=np.float32)

    # Remove the DC bias
    for s in speech_signals.T:
        s[:] = pra.highpass(s, pmt['fs'], 100.)

    if pmt['stft_win']:
        stft_win = np.hanning(pmt['nfft'])
    else:
        stft_win = None

    # Normalize the amplitude
    speech_signals *= pmt['scaling']

    # Compute STFT of signal
    # -------------------------
    y_mic_stft = []
    for k in range(speech_signals.shape[1]):
        y_stft = pra.stft(speech_signals[:, k], pmt['nfft'], pmt['stft_hop'],
                          transform=rfft, win=stft_win).T / np.sqrt(pmt['nfft'])
        y_mic_stft.append(y_stft)
    y_mic_stft = np.array(y_mic_stft)

    # estimate SNR in dB (on 1st microphone)
    sig_var = np.var(speech_signals)
    SNR = 10*np.log10( (sig_var - pmt['noise_var']) / pmt['noise_var'] )

    freq_bins = copy.copy(pmt['freq_bins'][K-1])

    # dict for output
    phi_recon = {}

    for alg in algo_names:

        # Use the convenient dictionary of algorithms defined
        d = doa.algos[alg](
                L=pmt['mic_array'], 
                fs=pmt['fs'], 
                nfft=pmt['nfft'], 
                num_src=K, 
                c=pmt['c'], 
                theta=pmt['phi_grid'], 
                max_four=pmt['M'], 
                num_iter=pmt['num_iter'],
                G_iter = pmt['G_iter']
                )

        # perform localization
        d.locate_sources(y_mic_stft, freq_bins=freq_bins[alg])

        # store result
        phi_recon[alg] = d.phi_recon

    return SNR, sources, phi_recon
Example #55
0
import os, sys
from os import path as op

import scipy
import nibabel as nb
import numpy as np
from pandas import read_table, read_csv
from patsy import dmatrices, dmatrix

from CPAC.cwas import cwas
from CPAC.cwas.utils import calc_subdists, calc_mdmrs
from CPAC.cwas.subdist import *
from CPAC.cwas.mdmr import mdmr, gen_perms

import mkl
mkl.set_num_threads(8)


####
# Cool Functions
####

def load_subject(filepath, dtype='float64'):
    return nb.load(filepath).get_data().astype(dtype)

def load_subjects(filepaths, dtype='float64'):
    print "Loading Subjects"
    funcs = [ load_subject(fp, dtype) for fp in filepaths ]
    return funcs

def rois2voxels(dat, rois):
Example #56
0
from trainer import Trainer
from utils import (update_task, get_max_of_db_column,
                   get_a_task, ExploitationNeeded,
                   LossIsNaN, get_task_ids_and_scores, PopulationFinished,
                   get_col_from_populations, RemainingTasksTaken,
                   print_with_time, ExploitationOcurring,
                   create_new_population)
from config import (get_optimizer, DATA_DIR, MODEL_CLASS, LOSS_FN,
                    HYPERPARAM_NAMES, EPOCHS, BATCH_SIZE, POPULATION_SIZE,
                    EXPLOIT_INTERVAL, USE_SQLITE)


if __name__ == "__main__":
    # TODO: Does this help?
    nproc = mkl.get_max_threads()  # e.g. 12
    mkl.set_num_threads(nproc)

    parser = argparse.ArgumentParser(description="Population Based Training")
    parser.add_argument("-g", "--gpu", type=int, default=0, help="Selects GPU with the given ID. IDs are those shown in nvidia-smi.")  # noqa
    parser.add_argument("-p", "--population_id", type=int, default=None, help="Resumes work on the population with the given ID. Use -1 to select the most recently created population. Without this flag, a new population will be created.")  # noqa
    parser.add_argument("-e", "--exploiter", action="store_true", help="Set this process as the exploiter. It will be responsible for running the exploit step over the entire population at the end of each interval.")  # noqa
    args = parser.parse_args()

    gpu = args.gpu
    population_id = args.population_id
    exploiter = args.exploiter
    inputs = bcolz.open(osp.join(DATA_DIR, "trn_inputs.bcolz"), 'r')
    targets = bcolz.open(osp.join(DATA_DIR, "trn_targets.bcolz"), 'r')
    pathlib.Path('checkpoints').mkdir(exist_ok=True)
    checkpoint_str = "checkpoints/pop-%03d_task-%03d.pth"
    interval_limit = int(np.ceil(EPOCHS / EXPLOIT_INTERVAL))
#!/usr/bin/env python

# Create spatially constrained ROIs using Cameron's pyClusterROI

import os, sys
from os import path
sys.path.append("/home2/data/Projects/CWAS/pyClusterROI")

# control mkl
import mkl
mkl.set_num_threads(4)


###
# 1. SETUP
###

obase = "/home2/data/Projects/CWAS/age+gender/01_resolution/spatial_cluster"
rbase = "/home2/data/Projects/CWAS/share/age+gender/analysis/01_resolution/rois"

# functions to save to nifti
from make_image_from_bin_renum import *


###
# 2. Generate Individual Connectivity Matrices
###

# Done in 04*

        mute_error=False)

    ###########################################################################################
    ## Remove the fake datasets because we don't need them anymore
    ###########################################################################################
    spdc_glob = glob(dir_fakes + os.path.sep +
                     "S*_spdc_distorcorr_{0}_ROC.fits".format(fakes_spectrum))
    for filename in spdc_glob:
        print("Removing {0}".format(filename))
        os.remove(filename)


if __name__ == "__main__":
    try:
        import mkl
        mkl.set_num_threads(1)
    except:
        pass

    print(platform.system())

    OS = platform.system()
    if OS == "Windows":
        print("Using WINDOWS!!")
    else:
        print("I hope you are using a UNIX OS")

    print('Number of arguments:', len(sys.argv), 'arguments.')
    print('Argument List:', str(sys.argv))
    print("CPU COUNT: {0}".format(mp.cpu_count()))
Example #59
0
    def get_projection_operator(self, sampling, scene, i_band=None, verbose=True):
        """
        Return the peak sampling operator.

        Parameters
        ----------
        sampling : QubicSampling
            The pointing information.
        scene : QubicScene
            The observed scene.
        verbose : bool, optional
            If true, display information about the memory allocation.

        """
        if not isinstance(scene.nu, float) and i_band == None:
            mask = [self.detector.index < 2295//2,
                    self.detector.index > 2295//2]
            dtype = self.synthetic_beam.dtype
            if scene.nside > 8192:
                dtype_index = np.dtype(np.int64)
            else:
                dtype_index = np.dtype(np.int32)
            theta0, phi, vals = _peak_angles_fraction(self, scene, self.synthetic_beam.fraction, 0)
            theta1, phi, vals = _peak_angles_fraction(self, scene, self.synthetic_beam.fraction, 1)
            ncolmax = max(theta0.shape[-1], theta1.shape[-1])

            cls = {'I'  : FSRMatrix,
                   'QU' : FSRRotation2dMatrix,
                   'IQU': FSRRotation3dMatrix}[scene.kind]
            ndims = len(scene.kind)
            s = cls((len(self) * len(sampling) * ndims, 12 * scene.nside**2 * ndims),
                    ncolmax=ncolmax, 
                    dtype=dtype,
                    dtype_index=dtype_index,
                    verbose=verbose)
            for i in [0, 1]:
                focal_plane = self[mask[i]]
                p = focal_plane.get_projection_operator(sampling, scene, i_band=i)
                smask = np.multiply.outer(mask[i], np.ones(len(sampling)), dtype=bool)
                smask = np.asarray(smask).reshape(-1)
                s.data[smask, :p.matrix.data.shape[-1]] = p.matrix.data
#                set_trace()
#                sh = s.data[mask[i], p.matrix.data.shape[-1]:].shape
#                tp = {'I'   : "int, float",
#                      'IQ'  : "int, float, float",
#                      'IQU' : "int, float, float, float"}[scene.kind]
#                s.data[mask[i], p.matrix.data.shape[-1]:] = np.ones(sh, dtype=tp) 
            shapeout = {'I'   : (len(self), len(sampling)),
                        'IQ'  : (len(self), len(sampling), 2),
                        'IQU' : (len(self), len(sampling), 3)}[scene.kind]
            return ProjectionOperator(s, shapeout=shapeout)
        else:
            rotation = sampling.cartesian_galactic2instrument
            dtype = self.synthetic_beam.dtype
            fraction = self.synthetic_beam.fraction
            ndetectors = len(self)
            ntimes = len(sampling)
            nside = scene.nside

            theta, phi, vals = _peak_angles_fraction(self, scene, fraction, i_band)
            ncolmax = theta.shape[-1]
            thetaphi = _pack_vector(theta, phi)  # (ndetectors, ncolmax, 2)
            direction = Spherical2CartesianOperator('zenith,azimuth')(thetaphi)
            e_nf = direction[:, None, :, :]
            if nside > 8192:
                dtype_index = np.dtype(np.int64)
            else:
                dtype_index = np.dtype(np.int32)

            cls = {'I': FSRMatrix,
                'QU': FSRRotation2dMatrix,
                'IQU': FSRRotation3dMatrix}[scene.kind]
            ndims = len(scene.kind)
            s = cls((ndetectors * ntimes * ndims, 12 * nside**2 * ndims),
                    ncolmax=ncolmax, dtype=dtype, dtype_index=dtype_index,
                    verbose=verbose)

            index = s.data.index.reshape((ndetectors, ntimes, ncolmax))

            nthreads = openmp_num_threads()
            try:
                import mkl
                mkl.set_num_threads(1)
            except:
                pass

            def func_thread(i):
                # e_nf[i] shape: (1, ncolmax, 3)
                # e_ni shape: (ntimes, ncolmax, 3)
                e_ni = rotation.T(e_nf[i].swapaxes(0, 1)).swapaxes(0, 1)
                index[i] = Cartesian2HealpixOperator(nside)(e_ni)

            pool = Pool(nthreads)
            pool.map(func_thread, xrange(ndetectors))
            pool.close()
            pool.join()

            try:
                mkl.set_num_threads(nthreads)
            except:
                pass

            if scene.kind == 'I':
                value = s.data.value.reshape(ndetectors, ntimes, ncolmax)
                value[...] = vals[:, None, :]
                shapeout = (ndetectors, ntimes)
            else:
                func = 'pointing_matrix_rot{0}d_i{1}_m{2}'.format(
                    ndims, dtype_index.itemsize, dtype.itemsize)
                try:
                    getattr(flib.polarization, func)(
                        rotation.data.T, direction.T, s.data.ravel().view(np.int8),
                        vals.T)
                except AttributeError:
                    raise TypeError(
                        'The projection matrix cannot be created with types: {0} a'
                        'nd {1}.'.format(dtype, dtype_index))
                if scene.kind == 'QU':
                    shapeout = (ndetectors, ntimes, 2)
                else:
                    shapeout = (ndetectors, ntimes, 3)

            return ProjectionOperator(s, shapeout=shapeout)