Esempio n. 1
0
    def checkAllDatasets(self):
        """
        Look for corrupted files in the whole catalog.
        """
        catalog = self.readCatalog()
        
        self.parallel_ = Parallel(50,self.queue_)
        ## self.parallel_ = Parallel(1,self.queue_)

        print "Checking all datasets"
        for dataset in catalog.keys():            
            self.checkDatasetFiles(dataset,catalog)
        
        outcomes = self.parallel_.wait(printOutput=False)

        ## for dsetName,ifile,fName,ret,out in outcomes:
        for ign1, ign2, outcome in outcomes:
            dsetName,ifile,fName,ret,out = outcome
            info = catalog[dsetName]["files"][ifile]
            if info["name"] != fName:
                print "Inconsistent outcome ", info["name"], dsetName,ifile,fName,ret,out
            else:
                if ret != 0:
                    info["bad"] = True
                else:
                    extraInfo = json.loads(str(out))
                    for key,val in extraInfo.iteritems():
                        info[key] = val

        print "Writing catalog"
        self.writeCatalog(catalog)
        print "Done"
Esempio n. 2
0
class USARTTest(unittest.TestCase):

    par = Parallel()

    def __init__(self, *args):
        unittest.TestCase.__init__(self, *args)
        self.ser = Serial(_SERIAL_PATH,
                          baudrate=_BAUDRATE,
                          bytesize=8,
                          parity='N',
                          stopbits=1,
                          timeout=1,
                          xonxoff=0,
                          rtscts=0)
        self._toWrite = "Write_TEST123"
        self._toRead = "Read_TEST123"

    def testread(self):
        time.sleep(0.1)
        USARTTest.par.setData(0x01)
        time.sleep(0.1)
        self.ser.write(self._toRead + '\n')
        USARTTest.par.setData(0x00)

        print "See avr console..."

    def testwrite(self):
        time.sleep(0.1)
        USARTTest.par.setData(0x01)
        line = self.ser.readline(len(self._toWrite) + 1, '\n')
        USARTTest.par.setData(0x00)

        self.assertNotEqual(len(line), 0)

        self.assertNotEqual(line.find(self._toWrite), -1, "Received %s" % line)
Esempio n. 3
0
    def __init__(self,
                 mode='dummy',
                 address=None,
                 high_duration=0.005,
                 verbose=None):
        if mode == 'parallel':
            if sys.platform.startswith('linux'):
                address = '/dev/parport0' if address is None else address
                if not isinstance(address, string_types):
                    raise ValueError('addrss must be a string or None, got %s '
                                     'of type %s' % (address, type(address)))
                from parallel import Parallel
                self._port = Parallel(address)
                self._portname = address
                self._set_data = self._port.setData
            elif sys.platform.startswith('win'):
                from ctypes import windll
                if not hasattr(windll, 'inpout32'):
                    raise SystemError(
                        'Must have inpout32 installed, see:\n\n'
                        'http://www.highrez.co.uk/downloads/inpout32/')

                base = 0x378 if address is None else address
                if isinstance(base, string_types):
                    base = int(base, 16)
                if not isinstance(base, int):
                    raise ValueError('address must be int or None, got %s of '
                                     'type %s' % (base, type(base)))
                self._port = windll.inpout32
                mask = np.uint8(1 << 5 | 1 << 6 | 1 << 7)
                # Use ECP to put the port into byte mode
                val = int((self._port.Inp32(base + 0x402) & ~mask) | (1 << 5))
                self._port.Out32(base + 0x402, val)

                # Now to make sure the port is in output mode we need to make
                # sure that bit 5 of the control register is not set
                val = int(self._port.Inp32(base + 2) & ~np.uint8(1 << 5))
                self._port.Out32(base + 2, val)
                self._set_data = lambda data: self._port.Out32(base, data)
                self._portname = str(base)
            else:
                raise NotImplementedError('Parallel port triggering only '
                                          'supported on Linux and Windows')
        else:  # mode == 'dummy':
            self._port = self._portname = None
            self._trigger_list = list()
            self._set_data = lambda x: (self._trigger_list.append(x)
                                        if x != 0 else None)
        self.high_duration = high_duration
        self.mode = mode
Esempio n. 4
0
 def __init__(self, motor_inputs, state=0, delay=0.05):
     '''
     :param motor_inputs: Ordered list of parallel values to turn motor
     :type motor_inputs: list or tuple
     :param state: Initial starting state of motor position
     :type state: int
     :param delay: Delay between steps (speed)
     :type delay: float
     '''
     self.MOTOR_INPUTS = motor_inputs
     self.state = state
     self.delay = delay
     # Setup parallel interface on first init
     self.parallel_interface = Parallel()
Esempio n. 5
0
    def __call__(self):
        """
        __call__
        Run all jobs.
        """
        self.parallel = Parallel(self.options.ncpu,
                                 lsfQueue=self.options.queue,
                                 lsfJobName="%s/runJobs" %
                                 self.options.outputDir,
                                 asyncLsf=False)

        self.jobs = None
        if self.options.cont:
            pass
        else:
            self.firstRun()

        self.monitor()
Esempio n. 6
0
    def getFilesFomEOS(self,dsetName):
        """
        Read dataset files crawling EOS.
        @dsetName: dataset name
        Note: not implemented
        """
        
        if not self.parallel_:
            self.parallel_ = Parallel(200,self.queue_)
        
        ret,out = self.parallel_.run("/afs/cern.ch/project/eos/installation/0.3.15/bin/eos.select",["find",dsetName],interactive=True)[2]
        ## print out
        files = []
        for line in out.split("\n"):
            if line.endswith(".root"):
                files.append( {"name":line.replace("/eos/cms",""), "nevents":0} )

        return files
Esempio n. 7
0
    def monitor(self):

        (options, args) = (self.options, self.args)
        parallel = self.parallel

        with open("%s/task_config.json" % (options.outputDir), "r") as cfin:
            task_config = json.loads(cfin.read())

        doutfiles = task_config["datasets_output"]
        poutfiles = task_config["process_output"]
        outfiles = task_config["output"]
        outputPfx = task_config["outputPfx"]

        if not options.dry_run:
            ## FIXME: job resubmission
            self.jobs = task_config["jobs"]
            returns = self.wait(parallel, self)
            task_config["jobs"] = self.jobs

        if options.hadd:
            print "All jobs finished. Merging output."
            p = Parallel(options.ncpu)
            hadd = "hadd -f "
            if options.hadd_process:
                for proc, out in poutfiles.iteritems():
                    outfile, outfiles = out
                    p.run("%s %s" % (hadd, outfile), outfiles)
            if options.hadd_dataset:
                if options.hadd_process:
                    hadd += " -T"
                for dset, out in doutfiles.iteritems():
                    outfile, outfiles = out
                    p.run("%s %s" % (hadd, outfile), outfiles)
            if not (options.hadd_process or options.hadd_dataset):
                p.run("%s %s.root" % (hadd, outputPfx), outfiles)

            self.wait(p)

        with open("%s/task_config.json" % (options.outputDir), "w+") as cfout:
            cfout.write(json.dumps(task_config, indent=4))
            cfout.close()

        self.parallel.stop()
Esempio n. 8
0
def insertMarks(expInfo, nombreEDF):
    ponermarcas = []
    if expInfo[EXPERIMENT_TYPE] == EMOTIV:
        from multiprocessing import Process, Queue
        import guardar
        q_marcas = Queue()
        p = Process(target=guardar.save_data, args=(
            nombreEDF,
            q_marcas,
        ))
        p.start()
        ponermarcas = 1
    elif expInfo[EXPERIMENT_TYPE] == TRADITIONAL_EEG:
        from parallel import Parallel  # Version sugerida por Fede (ver mail 02/08/2016)
        q_marcas = Parallel(
        )  # Version sugerida por Fede (ver mail 02/08/2016)
        q_marcas.setData(
            0)  # Solo para asegurarse de que arranque con todos los pins abajo
        ponermarcas = 2
    elif expInfo[EXPERIMENT_TYPE] == CONDUCTUAL:
        q_marcas = 1
        ponermarcas = 0
    return ponermarcas, q_marcas
Esempio n. 9
0
    warnings.simplefilter("ignore")

    tweets = None
    sentiment_dir = "../sentiment/"
    sentiment_models = {
        "text_blob": find_text_blob_sentiment,
        "vader": find_vader_sentiment,
    }

    for model_name, model_function in sentiment_models.items():
        sentiment_path = os.path.join(sentiment_dir, model_name) + ".pickle"
        if not os.path.exists(sentiment_path):
            if tweets is None:
                tweets = load_tweets()
                tweets = list(tweets.items())

            results = Parallel(find_text_blob_sentiment, tweets, model_name)

            sentiment = {tweet_id: value for tweet_id, value in results}
            save_pickle(sentiment, sentiment_path)

    model_name = "flair"
    sentiment_path = os.path.join(sentiment_dir, model_name) + ".pickle"
    if not os.path.exists(sentiment_path):
        if tweets is None:
            tweets = load_tweets()
            tweets = list(tweets.items())
        sentiment = find_flair_sentiment(tweets, chunk_len=100000)
        sentiment_models[model_name] = sentiment
        save_pickle(sentiment, sentiment_path)
Esempio n. 10
0
def process_mentions():
    """retrieves all mentions and generates captions for those who are fighting
        fit"""
    if not cfg('twitter:user_requests:bool'):
        return

    params = dict(count=200)
    sources_whitelist = cfg('twitter:sources_whitelist:list')
    mention_prefix = '@%s ' % twitter.me.screen_name.lower()

    try:
        with open('state_mentions_timeline.txt') as fp:
            since_id = int(fp.read())
        utils.logging.info('State: since_id=%d', since_id)
        params['since_id'] = since_id
    except Exception as exc:
        utils.logging.warning("There's no last id saved, so I will save the "
                              'last id I see and then quit.')
        since_id = None

    filtered_statuses = []
    statuses = [
        status for page in tweepy.Cursor(twitter.api.mentions_timeline, **
                                         params).pages() for status in page
    ]
    # they are in reverse chronological order, so put them straight
    statuses = statuses[::-1]
    if not since_id:
        since_id = statuses[-1].id
        with open('state_mentions_timeline.txt', 'wt') as fp:
            fp.write(str(since_id))
        utils.logging.info('New since_id=%d. Goodbye!', since_id)
        return

    for status in statuses:
        # ignore mentions that are not directed at me
        if not status.text.lower().startswith(mention_prefix):
            continue

        # ignore retweets
        if hasattr(status, 'retweeted_status'):
            continue

        # if the sources whitelist is enabled, ignore those who aren't on it
        if (sources_whitelist and status.source not in sources_whitelist):
            continue

        # store this status
        filtered_statuses.append(status)

    if filtered_statuses:
        utils.logging.info('Retrieved %d new mentions (from %d to %d).',
                           len(filtered_statuses), filtered_statuses[0].id,
                           filtered_statuses[-1].id)
        with open('state_mentions_timeline.txt', 'wt') as fp:
            fp.write(str(filtered_statuses[-1].id))

        Akari.warmup()

        parallel = Parallel(process_request, filtered_statuses,
                            cfg('twitter:process_threads:int') or 3)
        parallel.start()
    else:
        utils.logging.info('Retrieved no new mentions.')
Esempio n. 11
0
    print CommandSequence([Loop('x', 1, 10, 0.5, Comment("Hello"))])

    print CommandSequence(
        [Loop('x', 1, 10, 0.5, Comment("Hello"), completion=True)])

    print CommandSequence([
        Loop('x',
             1,
             10,
             0.5, [Comment("Hello"), Comment("World")],
             completion=True,
             timeout=10)
    ])

    print CommandSequence([
        Loop('x', 2, 20, 5, [
            Loop('y',
                 1,
                 10,
                 0.5, [Comment("Hello"), Comment("World")],
                 completion=True,
                 timeout=10)
        ])
    ])

    print CommandSequence([
        Parallel(Loop('x', 1, 10, 0.5, Comment("Hello")),
                 Loop('y', 1, 10, 0.5, Comment("There")))
    ])
Esempio n. 12
0
        dataset = Dataset(dataset_name=args.dataset)
        mlp_aux = MLP_AUX(dataset, args.negative_sampling_size,
                          eval(args.layers), args.epochs, args.batch_size,
                          args.validation_split, args.user_sampling_size,
                          args.core_number, args.sim_threshold)
        model = mlp_aux.train_model()
        hits, ndcgs = evaluate_model(model, dataset.test_data,
                                     dataset.test_negatives, 10, 1, True)
        print("Hitrate: {}".format(sum(hits) / len(hits)))
        print("NDCG: {}".format(sum(ndcgs) / len(ndcgs)))
    elif args.network_type == 'parallel':

        dataset = Dataset(dataset_name=args.dataset)
        parallel = Parallel(dataset, args.negative_sampling_size,
                            eval(args.layers), args.epochs, args.batch_size,
                            args.validation_split)
        model = parallel.train_model()
        hits, ndcgs = evaluate_model(model, dataset.test_data,
                                     dataset.test_negatives, 10, 1)
        print("Hitrate: {}".format(sum(hits) / len(hits)))
        print("NDCG: {}".format(sum(ndcgs) / len(ndcgs)))
    elif args.network_type == 'parallel-aux':
        dataset = Dataset(dataset_name=args.dataset)
        parallel_aux = Parallel_AUX(dataset, args.negative_sampling_size,
                                    eval(args.layers), args.epochs,
                                    args.batch_size, args.validation_split,
                                    args.user_sampling_size, args.core_number,
                                    args.sim_threshold)
        model = parallel_aux.train_model()
        hits, ndcgs = evaluate_model(model, dataset.test_data,
Esempio n. 13
0
def main():
    # Info de la sesion
    expInfo = {
        NAME: 'nombre',
        BIRTHDATE: 'DD/MM/AA',
        HAND: 'mano',
        EXPERIMENT_TYPE: 'conductual',
        OPERATOR: ''
    }

    # Presento cuadro para rellenar
    dlg = gui.DlgFromDict(expInfo, title='Formulario')
    if not (dlg.OK):
        core.quit()
    else:
        fileName = expInfo[NAME]
        if not os.path.exists('./Datos/' + fileName):
            os.makedirs('./Datos/' + fileName)
        dataFile = open(
            './Datos/' + fileName + '/' + str(datetime.date.today()) + '_' +
            fileName + '.csv', 'a')
        nombreEDF = './Datos/' + fileName + "/" + str(
            datetime.date.today()) + '_' + fileName
        texto = expInfo[NAME] + '; ' + str(datetime.datetime.now(
        )) + '; ' + expInfo[BIRTHDATE] + '; ' + expInfo[HAND] + '; ' + expInfo[
            EXPERIMENT_TYPE] + '; ' + expInfo[OPERATOR]
    dataFile.write(texto)

    ##########################
    ##  Parametros Pantalla ##
    ##########################
    res = [gtk.gdk.screen_width(), gtk.gdk.screen_height()]
    pantCompleta = True

    #win = visual.Window(res, monitor="Mi Monitor", units="pix",  color=gris, colorSpace='hex', fullscr=pantCompleta)
    win = visual.Window(res,
                        units="pix",
                        color=gris,
                        colorSpace='hex',
                        fullscr=pantCompleta,
                        monitor="testMonitor")
    win.setMouseVisible(False)

    ###########################################
    ## Inicializo parametros del experimento ##
    ###########################################
    proporcion = 0.7
    pruebas = 30
    Nsess = 12

    # Tiempos
    #StimDur = 0.184
    #ISI = 0.986
    StimDur = 0.404
    ISI = 0.986

    ponermarcas = []
    if expInfo[EXPERIMENT_TYPE] == EMOTIV:
        from multiprocessing import Process, Queue
        import guardar
        q_marcas = Queue()
        p = Process(target=guardar.save_data, args=(
            nombreEDF,
            q_marcas,
        ))
        p.start()
        ponermarcas = 1
    elif expInfo[EXPERIMENT_TYPE] == TRADITIONAL_EEG:
        from parallel import Parallel  # Version sugerida por Fede (ver mail 02/08/2016)
        #from psychopy import parallel
        # BIOSEMI
        #q_marcas=parallel.ParallelPort(address=u'/dev/parport0')
        #q_marcas=parallel.PParallelDLPortIO(address=888) # Chequear que este bien la direccion del puerto paralelo
        q_marcas = Parallel(
        )  # Version sugerida por Fede (ver mail 02/08/2016)
        q_marcas.setData(
            0)  # Solo para asegurarse de que arranque con todos los pins abajo
        ponermarcas = 2
    elif expInfo[EXPERIMENT_TYPE] == CONDUCTUAL:
        q_marcas = 1
        ponermarcas = 0

    cond = pacman
    stimuli = [
        pacmanImage, "./estimulo/fantasma_naranja.png",
        "./estimulo/fantasma_rosado.png", "./estimulo/fantasma_verde.png",
        "./estimulo/fantasma_azul.png"
    ]
    pantalla_inicio = "./estimulo/pantini_pacman.png"
    run_training(win, proporcion, 10, 6, StimDur, ISI, res, gris, negro,
                 blanco, stimuli, pantalla_inicio)
    run_experiment(dataFile, win, proporcion, pruebas, Nsess, StimDur, ISI,
                   q_marcas, ponermarcas, res, gris, negro, blanco, stimuli,
                   pantalla_inicio, cond)

    cond = angry
    stimuli = [
        birdImage, "./estimulo/cerdo_naranja.png",
        "./estimulo/cerdo_rosado.png", "./estimulo/cerdo_verde.png",
        "./estimulo/cerdo_azul.png"
    ]
    pantalla_inicio = "./estimulo/pantini_angry.png"
    run_training(win, proporcion, 10, 6, StimDur, ISI, res, gris, negro,
                 blanco, stimuli, pantalla_inicio)
    run_experiment(dataFile, win, proporcion, pruebas, Nsess, StimDur, ISI,
                   q_marcas, ponermarcas, res, gris, negro, blanco, stimuli,
                   pantalla_inicio, cond)
Esempio n. 14
0
 def do_task(self):
     if self.task != None:
         p = Parallel()
         print(p)
         Parallel().run(self.task.logic)
Esempio n. 15
0
    def checkDatasetFiles(self,dsetName,catalog=None):
        """
        Look for corrupted files in dataset.
        @dsetName: dataset name
        Note: not implemented
        """
        writeCatalog = False
        if not catalog:
            catalog = self.readCatalog()
            writeCatalog = True
        
        wait = False
        if not self.parallel_:
            self.parallel_ = Parallel(16,self.queue_)
            wait = True

        print 
        print "Checking dataset",dsetName
        info = catalog[dsetName]
        files = info["files"]
        print "Number of files: ", len(files)
        
        toremove = []
        for ifil,eifil in enumerate(files):
            if ifil in toremove:
                continue
            for jfil,ejfil in enumerate(files[ifil+1:]):
                if ifil+jfil in toremove:
                    continue
                if eifil["name"] == ejfil["name"]:
                    toremove.append(ifil)
                else:
                    iid = eifil["name"].rstrip(".root").rsplit("_",1)[-1]
                    jid = ejfil["name"].rstrip(".root").rsplit("_",1)[-1]
                    if iid == jid:
                        print "duplicated file index ", iid
                        print eifil["name"]
                        print ejfil["name"]
                        reply=ask_user("keep both? ")
                        if reply == "n":
                            if ask_user( "keep %s? " % ejfil["name"] ) == "n":
                                ## files.pop(ifil+jfil)
                                toremove.append(ifil+jfil)
                            if ask_user( "keep %s? " % eifil["name"] ) == "n":
                                toremove.append(ifil)
                                ## files.pop(ifil)
                                
        for ifile in sorted(toremove,reverse=True):
            ## print ifile
            files.pop(ifile)
            
        print "After duplicates removal: ", len(files)
        info = catalog[dsetName]["files"] = files
        for ifile,finfo in enumerate(files):            
            name = finfo["name"]
            self.parallel_.run(SamplesManager.checkFile,[self,name,dsetName,ifile])

        if wait:
            self.parallel_.wait(printOutput=False)            
            self.parallel_ = None
            
        if writeCatalog:
            self.writeCatalog(catalog)
Esempio n. 16
0
    def run(self,
            genome_files,
            output_dir,
            called_genes=False,
            translation_table=None,
            meta=False,
            closed_ends=False):
        """Call genes with Prodigal.

        Call genes with prodigal and store the results in the
        specified output directory. For convenience, the
        called_gene flag can be used to indicate genes have
        previously been called and simply need to be copied
        to the specified output directory.

        Parameters
        ----------
        genome_files : list of str
            Nucleotide fasta files to call genes on.
        called_genes : boolean
            Flag indicating if genes are already called.
        translation_table : int
            Specifies desired translation table, use None to automatically
            select between tables 4 and 11.
        meta : boolean
            Flag indicating if prodigal should call genes with the metagenomics procedure.
        closed_ends : boolean
            If True, do not allow genes to run off edges (throws -c flag).
        output_dir : str
            Directory to store called genes.

        Returns
        -------
        d[genome_id] -> namedtuple(best_translation_table
                                            coding_density_4
                                            coding_density_11)
            Summary statistics of called genes for each genome.
        """

        self.called_genes = called_genes
        self.translation_table = translation_table
        self.meta = meta
        self.closed_ends = closed_ends
        self.output_dir = output_dir

        make_sure_path_exists(self.output_dir)

        progress_func = None
        if self.verbose:
            file_type = 'genomes'
            self.progress_str = '  Finished processing %d of %d (%.2f%%) genomes.'
            if meta:
                file_type = 'scaffolds'
                if len(genome_files):
                    file_type = ntpath.basename(genome_files[0])

                self.progress_str = '  Finished processing %d of %d (%.2f%%) files.'

            self.logger.info('Identifying genes within %s: ' % file_type)
            progress_func = self._progress

        parallel = Parallel(self.cpus)
        summary_stats = parallel.run(
            self._producer, self._consumer, genome_files, progress_func)

        # An error was encountered during Prodigal processing, clean up.
        if not summary_stats:
            shutil.rmtree(self.output_dir)

        return summary_stats
    def fit(self, X, y, sample_weight=None):  # 线性回归方法
        """
        Fit linear model.

        Parameters
        ----------
        X : array-like or sparse matrix, shape (n_samples, n_features)
            Training data

        y : array_like, shape (n_samples, n_targets)
            Target values. Will be cast to X's dtype if necessary

        sample_weight : numpy array of shape [n_samples]
            Individual weights for each sample

            .. versionadded:: 0.17
               parameter *sample_weight* support to LinearRegression.

        Returns
        -------
        self : returns an instance of self.
        """

        n_jobs_ = self.n_jobs
        X, y = check_X_y(X,
                         y,
                         accept_sparse=['csr', 'csc', 'coo'],
                         y_numeric=True,
                         multi_output=True)  # 检查数据并进行必要的格式转换

        #------------------------------
        # atleast_xd 支持将输入的数据
        # 直接视为 x 维
        # atleast_1d
        # atleast_2d
        # atleast_3d
        if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
            raise ValueError("Sample weights must be 1D array or scalar"
                             )  # sampple_weight 必须为一维 array

        X, y, X_offset, y_offset, X_scale = self._preprocess_data(
            X,
            y,
            fit_intercept=self.fit_intercept,
            normalize=self.normalize,
            copy=self.copy_X,
            sample_weight=sample_weight)  # 对 X 数据进行中心以及归一化

        if sample_weight is not None:
            # Sample weight can be implemented via a simple rescaling.
            X, y = _rescale_data(X, y, sample_weight)  # 给数据添加 sample_weight

        if sp.issparse(X):

            if y.ndim < 2:

                out = sparse_lsqr(X, y)  # 哈哈哈!直接调用 scipy sparse linalg 的
                # least square solution
                self.coef_ = out[0]
                self._residues = out[3]

            else:
                # sparse_lstsq cannot handle y with shape (M, K)
                outs = Parallel(n_jobs=n_jobs_)(
                    delayed(sparse_lsqr)(X, y[:, j].ravel())
                    for j in range(y.shape[1]))
                self.coef_ = np.vstack(out[0] for out in outs)
                self._residues = np.vstack(out[3] for out in outs)

        else:
            self.coef_, self._residues, self.rank_, self.singular_ = \
                linalg.lstsq(X, y) # Compute least-squares solution to equation Ax = b 哈哈哈!!
            self.coef_ = self.coef_.T

        if y.ndim == 1:

            self.coef_ = np.ravel(self.coef_)
            self._set_intercept(X_offset, y_offset, X_scale)

        return self
Esempio n. 18
0
    def update(self,
               nprocs=1,
               factor=None,
               bw_hz=None,
               foi_hz=None,
               fs_hz=None,
               f_ord=None,
               ftype=None,
               n_freqs=None,
               n_samples=None,
               n_channels=None):

        self.n_channels = n_channels if n_channels is not None else self.n_channels
        self.n_freqs = n_freqs if n_freqs is not None else self.n_freqs
        self.n_processes = min(Parallel.check_nprocs() -
                               1, self.n_freqs) if nprocs != 1 else 1

        # Signal process properties
        self.decimate_by = factor
        self.n_samples = int(n_samples / self.decimate_by)
        self.sample_rate = fs_hz / self.decimate_by if fs_hz is not None else self.sample_rate

        self.bandwidth = bw_hz if bw_hz is not None else self.bandwidth

        self.w_, self.H_ = self.create_filter(f_ord,
                                              self.bandwidth / 2.0,
                                              self.sample_rate / 2.0,
                                              self.n_samples,
                                              ftype='fir',
                                              output='freq')
        self.Hwin = self.H_[np.logical_and(self.w_ >= -self.bandwidth / 2.0,
                                           self.w_ < self.bandwidth / 2.0)]

        self.n_samples_procs = self.Hwin.size

        # Setup center frequencies
        if len(foi_hz) > 1:
            cf = np.arange(*foi_hz, np.diff(foi_hz) / self.n_freqs, dtype=int)
            diff = cf.shape[0] - self.n_freqs
            if diff > 0:
                cf = cf[:-diff]
        else:
            cf = foi_hz

        self.freqs = np.asarray([
            (f - self.bandwidth / 2, f + self.bandwidth / 2) for f in cf
        ])

        # Create rules for how to handle the data
        self._encoder_rule()
        self._decoder_rule()

        if self.n_processes > 1:
            self.pfunc = Parallel(self.multiply,
                                  nprocs=self.n_processes,
                                  axis=0,
                                  flag=0,
                                  ins_shape=[(self.n_channels, self.n_freqs,
                                              self.n_samples_procs),
                                             (1, self.n_samples_procs)],
                                  ins_dtype=[np.complex64, np.complex64],
                                  out_shape=(self.n_channels, self.n_freqs,
                                             self.n_samples_procs),
                                  out_dtype=np.complex64)
Esempio n. 19
0
# !setup!
import neworder
from parallel import Parallel  # import our model definition

#neworder.verbose()
#neworder.checked(False)

# must be MPI enabled
assert neworder.mpi.size(
) > 1, "This configuration requires MPI with >1 process"
# !setup!

# !run!
population_size = 100
p = 0.01
timeline = neworder.LinearTimeline(0, 10, 10)
model = Parallel(timeline, p, population_size)
neworder.run(model)
#!run!