示例#1
0
 def _reinit(self, source, numrepeat, type, numtile, indexmapping):
     self._original_init_arguments = (source, numrepeat, type, numtile, indexmapping)
     if isinstance(source, Bufferable):
         source = (source,)
     if indexmapping is None:
         nchannels = array([s.nchannels for s in source])
         idx = hstack(([0], cumsum(nchannels)))
         I = [arange(start, stop) for start, stop in zip(idx[:-1], idx[1:])]
         I = tuple(repeat(i, numrepeat) for i in I)
         if type=='serial':
             indexmapping = hstack(I)
         elif type=='interleave':
             if len(unique(nchannels))!=1:
                 raise ValueError('For interleaving, all inputs must have an equal number of channels.')
             I0 = len(I[0])
             indexmapping = zeros(I0*len(I), dtype=int)
             for j, i in enumerate(I):
                 indexmapping[j::len(I)] = i
         else:
             raise ValueError('Type must be "serial" or "interleave"')
         indexmapping = tile(indexmapping, numtile)
     if not isinstance(indexmapping, ndarray):
         indexmapping = array(indexmapping, dtype=int)
     # optimisation to reduce multiple RestructureFilterbanks into a single
     # one, by collating the sources and reconstructing the indexmapping
     # from the individual indexmappings
     if all(isinstance(s, RestructureFilterbank) for s in source):
         newsource = ()
         newsourcesizes = ()
         for s in source:
             s._has_been_optimised = True
             s._optimisation_target = self
             newsource += s.source
             inputsourcesize = sum(inpsource.nchannels for inpsource in s.source)
             newsourcesizes += (inputsourcesize,)
         newsourcesizes = array(newsourcesizes)
         newsourceoffsets = hstack((0, cumsum(newsourcesizes)))
         new_indexmapping = zeros_like(indexmapping)
         sourcesizes = array(tuple(s.nchannels for s in source))
         sourceoffsets = hstack((0, cumsum(sourcesizes)))
         # gives the index of the source of each element of indexmapping
         sourceindices = digitize(indexmapping, cumsum(sourcesizes))
         for i in xrange(len(indexmapping)):
             source_index = sourceindices[i]
             s = source[source_index]
             relative_index = indexmapping[i]-sourceoffsets[source_index]
             source_relative_index = s.indexmapping[relative_index]
             new_index = source_relative_index+newsourceoffsets[source_index]
             new_indexmapping[i] = new_index
         source = newsource
         indexmapping = new_indexmapping
             
     self.indexmapping = indexmapping
     self.nchannels = len(indexmapping)
     self.samplerate = source[0].samplerate
     for s in source:
         if int(s.samplerate)!=int(self.samplerate):
             raise ValueError('All sources must have the same samplerate.')
     self._source = source
示例#2
0
    def _reinit(self, source, numrepeat, type, numtile, indexmapping):
        self._original_init_arguments = (source, numrepeat, type, numtile, indexmapping)
        if isinstance(source, Bufferable):
            source = (source,)
        if indexmapping is None:
            nchannels = array([s.nchannels for s in source])
            idx = hstack(([0], cumsum(nchannels)))
            I = [arange(start, stop) for start, stop in zip(idx[:-1], idx[1:])]
            I = tuple(repeat(i, numrepeat) for i in I)
            if type == "serial":
                indexmapping = hstack(I)
            elif type == "interleave":
                if len(unique(nchannels)) != 1:
                    raise ValueError("For interleaving, all inputs must have an equal number of channels.")
                I0 = len(I[0])
                indexmapping = zeros(I0 * len(I), dtype=int)
                for j, i in enumerate(I):
                    indexmapping[j :: len(I)] = i
            else:
                raise ValueError('Type must be "serial" or "interleave"')
            indexmapping = tile(indexmapping, numtile)
        if not isinstance(indexmapping, ndarray):
            indexmapping = array(indexmapping, dtype=int)
        # optimisation to reduce multiple RestructureFilterbanks into a single
        # one, by collating the sources and reconstructing the indexmapping
        # from the individual indexmappings
        if all(isinstance(s, RestructureFilterbank) for s in source):
            newsource = ()
            newsourcesizes = ()
            for s in source:
                s._has_been_optimised = True
                s._optimisation_target = self
                newsource += s.source
                inputsourcesize = sum(inpsource.nchannels for inpsource in s.source)
                newsourcesizes += (inputsourcesize,)
            newsourcesizes = array(newsourcesizes)
            newsourceoffsets = hstack((0, cumsum(newsourcesizes)))
            new_indexmapping = zeros_like(indexmapping)
            sourcesizes = array(tuple(s.nchannels for s in source))
            sourceoffsets = hstack((0, cumsum(sourcesizes)))
            # gives the index of the source of each element of indexmapping
            sourceindices = digitize(indexmapping, cumsum(sourcesizes))
            for i in xrange(len(indexmapping)):
                source_index = sourceindices[i]
                s = source[source_index]
                relative_index = indexmapping[i] - sourceoffsets[source_index]
                source_relative_index = s.indexmapping[relative_index]
                new_index = source_relative_index + newsourceoffsets[source_index]
                new_indexmapping[i] = new_index
            source = newsource
            indexmapping = new_indexmapping

        self.indexmapping = indexmapping
        self.nchannels = len(indexmapping)
        self.samplerate = source[0].samplerate
        for s in source:
            if int(s.samplerate) != int(self.samplerate):
                raise ValueError("All sources must have the same samplerate.")
        self._source = source
示例#3
0
def all(sequence):
    """
    Returns True if all elements of sequence are True.  It is equivalent
    to calling reduce(op_and, sequence, True).

        >>> all([True, False, False])
        False

        >>> all([])
        True
    """
    return __builtin__.all(sequence)
示例#4
0
def all(sequence):
    """
    Returns True if all elements of sequence are True.  It is equivalent
    to calling reduce(op_and, sequence, True).

        >>> all([True, False, False])
        False

        >>> all([])
        True
    """
    return __builtin__.all(sequence)
示例#5
0
def all(iterable):
    warnings.warn("django.utils.itercompat.all is deprecated; use the native version instead",
                  DeprecationWarning)
    return __builtin__.all(iterable)
示例#6
0
def all(f, x):
    return __builtin__.all(__builtin__.map(f, x))
示例#7
0
def all(iterable):
    warnings.warn(
        "django.utils.itercompat.all is deprecated; use the native version instead",
        PendingDeprecationWarning)
    return __builtin__.all(iterable)
示例#8
0
def all(f, x):
    return __builtin__.all(__builtin__.map(f, x))
示例#9
0
文件: run.py 项目: CKAKA/pyspider
def bench(ctx, fetcher_num, processor_num, result_worker_num, run_in, total, show):
    from pyspider.libs import bench
    from pyspider.webui import bench_test

    ctx.obj['debug'] = False
    g = ctx.obj
    if result_worker_num == 0:
        g['processor2result'] = None

    if run_in == 'subprocess' and os.name != 'nt':
        run_in = run_in_subprocess
    else:
        run_in = run_in_thread

    g.projectdb.insert('bench', {
        'name': 'bench',
        'status': 'RUNNING',
        'script': bench.bench_script % {'total': total, 'show': show},
        'rate': total,
        'burst': total,
        'updatetime': time.time()
    })

    # disable log
    logging.getLogger().setLevel(logging.ERROR)
    logging.getLogger('scheduler').setLevel(logging.ERROR)
    logging.getLogger('fetcher').setLevel(logging.ERROR)
    logging.getLogger('processor').setLevel(logging.ERROR)
    logging.getLogger('result').setLevel(logging.ERROR)
    logging.getLogger('webui').setLevel(logging.ERROR)

    threads = []

    # result worker
    result_worker_config = g.config.get('result_worker', {})
    for i in range(result_worker_num):
        threads.append(run_in(ctx.invoke, result_worker,
                              ResultWorker=bench.BenchResultWorker, **result_worker_config))

    # processor
    processor_config = g.config.get('processor', {})
    for i in range(processor_num):
        threads.append(run_in(ctx.invoke, processor,
                              Processor=bench.BenchProcessor, **processor_config))

    # fetcher
    fetcher_config = g.config.get('fetcher', {})
    fetcher_config.setdefault('xmlrpc_host', '127.0.0.1')
    for i in range(fetcher_num):
        threads.append(run_in(ctx.invoke, fetcher,
                              Fetcher=bench.BenchFetcher, **fetcher_config))

    # scheduler
    scheduler_config = g.config.get('scheduler', {})
    scheduler_config.setdefault('xmlrpc_host', '127.0.0.1')
    threads.append(run_in(ctx.invoke, scheduler,
                          Scheduler=bench.BenchScheduler, **scheduler_config))

    # webui
    webui_config = g.config.get('webui', {})
    webui_config.setdefault('scheduler_rpc', 'http://localhost:%s/'
                            % g.config.get('scheduler', {}).get('xmlrpc_port', 23333))
    threads.append(run_in(ctx.invoke, webui, **webui_config))

    # run project
    time.sleep(1)
    import requests
    rv = requests.post('http://localhost:5000/run', data= {
        'project': 'bench',
    })
    assert rv.status_code == 200, 'run project error'

    # wait bench test finished
    while True:
        time.sleep(1)
        if __builtin__.all(getattr(g, x) is None or getattr(g, x).empty() for x in (
                'newtask_queue', 'status_queue', 'scheduler2fetcher',
                'fetcher2processor', 'processor2result')):
            break

    # exit components run in threading
    for each in g.instances:
        each.quit()

    # exit components run in subprocess
    for each in threads:
        if hasattr(each, 'terminate'):
            each.terminate()
        each.join(1)
示例#10
0
def all(iterable, pred):
    "Returns True if ALL elements in the given iterable are true for the given pred function"
    return builtins.all(pred(x) for x in iterable)
示例#11
0
 def contains (self, instance, *instances):
     return Filter(lambda x: __builtin__.all(inst in x.field_get(self.name) \
                             for inst in (instance,) + instances))
示例#12
0
    def all (self, constraint, *constraints):
        for extra in constraints:
            constraint = constraint & extra

        return Filter(lambda x: __builtin__.all(constraint(inst) for inst in \
                                    x.field_get(self.name)))