Ejemplo n.º 1
0
def test_get_context_using_python3_posix():
    """ get_context() respects configuration.

    If default context is changed this test will need to change too.
    """
    assert get_context() is multiprocessing.get_context(None)
    with dask.config.set({"multiprocessing.context": "forkserver"}):
        assert get_context() is multiprocessing.get_context("forkserver")
    with dask.config.set({"multiprocessing.context": "spawn"}):
        assert get_context() is multiprocessing.get_context("spawn")
Ejemplo n.º 2
0
def get_multiproc_context(capabilities):
    best_concurrency = capabilities.get('Process Startup Method', 'fork')
    if hasattr(multiprocessing, 'get_context'):
        for each in (best_concurrency, 'fork', 'spawn'):
            if hasattr(multiprocessing, 'get_all_start_methods'):
                if each in multiprocessing.get_all_start_methods():
                    return multiprocessing.get_context(each)
            else:
                try:
                    return multiprocessing.get_context(each)
                except ValueError:
                    pass # invalid concurrency for this system
    return None
Ejemplo n.º 3
0
def testfs(tmpdir):

    # We can't use forkserver or spawn because of
    # https://github.com/pytest-dev/pytest/issues/958.
    if hasattr(multiprocessing, 'get_context'):
        mp = multiprocessing.get_context('fork')
    else:
        # Older versions only support *fork* anyway
        mp = multiprocessing
    if threading.active_count() != 1:
        raise RuntimeError("Multi-threaded test running is not supported")

    mnt_dir = str(tmpdir)
    with mp.Manager() as mgr:
        cross_process = mgr.Namespace()
        mount_process = mp.Process(target=run_fs,
                                   args=(mnt_dir, cross_process))

        mount_process.start()
        try:
            wait_for_mount(mount_process, mnt_dir)
            yield (mnt_dir, cross_process)
        except:
            cleanup(mnt_dir)
            raise
        else:
            umount(mount_process, mnt_dir)
Ejemplo n.º 4
0
def test_read_text(hdfs):
    db = pytest.importorskip('dask.bag')
    import multiprocessing as mp
    pool = mp.get_context('spawn').Pool(2)

    with hdfs.open('%s/text.1.txt' % basedir, 'wb') as f:
        f.write('Alice 100\nBob 200\nCharlie 300'.encode())

    with hdfs.open('%s/text.2.txt' % basedir, 'wb') as f:
        f.write('Dan 400\nEdith 500\nFrank 600'.encode())

    with hdfs.open('%s/other.txt' % basedir, 'wb') as f:
        f.write('a b\nc d'.encode())

    b = db.read_text('hdfs://%s/text.*.txt' % basedir)
    with dask.config.set(pool=pool):
        result = b.str.strip().str.split().map(len).compute()

    assert result == [2, 2, 2, 2, 2, 2]

    b = db.read_text('hdfs://%s/other.txt' % basedir)
    with dask.config.set(pool=pool):
        result = b.str.split().flatten().compute()

    assert result == ['a', 'b', 'c', 'd']
Ejemplo n.º 5
0
    def _exec_task_process(self, ctxt, task_id, task_type, origin, destination,
                           instance, task_info):
        mp_ctx = multiprocessing.get_context('spawn')
        mp_q = mp_ctx.Queue()
        mp_log_q = mp_ctx.Queue()
        p = mp_ctx.Process(
            target=_task_process,
            args=(ctxt, task_id, task_type, origin, destination, instance,
                  task_info, mp_q, mp_log_q))

        p.start()
        LOG.info("Task process started: %s", task_id)
        self._rpc_conductor_client.set_task_host(
            ctxt, task_id, self._server, p.pid)

        self._handle_mp_log_events(p, mp_log_q)
        p.join()

        if mp_q.empty():
            raise exception.CoriolisException("Task canceled")
        result = mp_q.get(False)

        if isinstance(result, str):
            raise exception.TaskProcessException(result)
        return result
Ejemplo n.º 6
0
    def __init__(self, host='127.0.0.1', http_port=9786, tcp_port=8786,
                 bokeh_port=8787, bokeh_whitelist=[], log_level='info',
                 show=False, prefix=None, use_xheaders=False):
        self.port = bokeh_port
        ip = socket.gethostbyname(host)

        hosts = ['localhost',
                 '127.0.0.1',
                 ip,
                 host]

        with ignoring(Exception):
            hosts.append(socket.gethostbyname(ip))
        with ignoring(Exception):
            hosts.append(socket.gethostbyname(socket.gethostname()))

        hosts = ['%s:%d' % (h, bokeh_port) for h in hosts]

        hosts.append("*")

        hosts.extend(map(str, bokeh_whitelist))

        args = ([binname, 'serve'] + paths +
                ['--log-level', 'warning',
                 '--check-unused-sessions=50',
                 '--unused-session-lifetime=1',
                 '--port', str(bokeh_port)] +
                 sum([['--host', h] for h in hosts], []))

        if prefix:
            args.extend(['--prefix', prefix])

        if show:
            args.append('--show')

        if use_xheaders:
            args.append('--use-xheaders')

        if log_level in ('debug', 'info', 'warning', 'error', 'critical'):
            args.extend(['--log-level', log_level])

        bokeh_options = {'host': host,
                         'http-port': http_port,
                         'tcp-port': tcp_port,
                         'bokeh-port': bokeh_port}
        with open('.dask-web-ui.json', 'w') as f:
            json.dump(bokeh_options, f, indent=2)

        if sys.version_info[0] >= 3:
            from bokeh.command.bootstrap import main
            ctx = multiprocessing.get_context('spawn')
            self.process = ctx.Process(target=main, args=(args,))
            self.process.daemon = True
            self.process.start()
        else:
            import subprocess
            self.process = subprocess.Popen(args)

        logger.info(" Bokeh UI at:  http://%s:%d/status/"
                    % (ip, bokeh_port))
Ejemplo n.º 7
0
    def test_ipc_handle_serialization(self):
        # prepare data for IPC
        arr = np.arange(10, dtype=np.intp)
        devarr = cuda.to_device(arr)

        # create IPC handle
        ctx = cuda.current_context()
        ipch = ctx.get_ipc_handle(devarr.gpu_data)

        # pickle
        buf = pickle.dumps(ipch)
        ipch_recon = pickle.loads(buf)
        self.assertIs(ipch_recon.base, None)
        self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
        self.assertEqual(ipch_recon.size, ipch.size)

        # spawn new process for testing
        ctx = mp.get_context('spawn')
        result_queue = ctx.Queue()
        args = (ipch, result_queue)
        proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
        proc.start()
        succ, out = result_queue.get()
        if not succ:
            self.fail(out)
        else:
            np.testing.assert_equal(arr, out)
        proc.join(3)
Ejemplo n.º 8
0
    def test_ipc_handle(self):
        # prepare data for IPC
        arr = np.arange(10, dtype=np.intp)
        devarr = cuda.to_device(arr)

        # create IPC handle
        ctx = cuda.current_context()
        ipch = ctx.get_ipc_handle(devarr.gpu_data)

        # manually prepare for serialization as bytes
        handle_bytes = bytes(ipch.handle)
        size = ipch.size

        # spawn new process for testing
        ctx = mp.get_context('spawn')
        result_queue = ctx.Queue()
        args = (handle_bytes, size, result_queue)
        proc = ctx.Process(target=base_ipc_handle_test, args=args)
        proc.start()
        succ, out = result_queue.get()
        if not succ:
            self.fail(out)
        else:
            np.testing.assert_equal(arr, out)
        proc.join(3)
Ejemplo n.º 9
0
 def start_child(self):
     ctx = mp.get_context(self.SPAWN_METHOD)
     state = ctx.Value(_State, False, False, False)
     process = ctx.Process(target=_worker,
                           args=(state, self.loader))
     process.start()
     asyncio.async(self.connect(process, state))
Ejemplo n.º 10
0
    def __init__(self, initialValue = None):
        ctx = multiprocessing.get_context()
        self.mgr = multiprocessing.Manager()
        self.data = self.mgr.dict()

        self.data["value"] = initialValue
        self.condition = ctx.Condition()
Ejemplo n.º 11
0
def main():
    ctx = mp.get_context('spawn')
    q = ctx.Queue()
    p = ctx.Process(target=foo, args=(q,))
    p.start()
    print(q.get())
    p.join()
Ejemplo n.º 12
0
 def __init__(self):
     self.ctx = multiprocessing.get_context()
     self.consumers = {}
     self.queue = self.ctx.Queue()
     self.broadcast = BroadcastValue()
     self.threads = []
     self.object = None
Ejemplo n.º 13
0
def main(center, host, port, http_port, bokeh_port, show, _bokeh, bokeh_whitelist):
    given_host = host
    host = host or get_ip()
    ip = socket.gethostbyname(host)
    loop = IOLoop.current()
    scheduler = Scheduler(center, ip=ip,
                          services={('http', http_port): HTTPScheduler})
    if center:
        loop.run_sync(scheduler.sync_center)
    scheduler.start(port)

    if _bokeh:
        try:
            import bokeh
            import distributed.bokeh
            hosts = ['%s:%d' % (h, bokeh_port) for h in
                     ['localhost', '127.0.0.1', ip, socket.gethostname(),
                      host] + list(bokeh_whitelist)]
            dirname = os.path.dirname(distributed.__file__)
            paths = [os.path.join(dirname, 'bokeh', name)
                     for name in ['status', 'tasks']]
            binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'
            args = ([binname, 'serve'] + paths +
                    ['--log-level', 'warning',
                     '--check-unused-sessions=50',
                     '--unused-session-lifetime=1',
                     '--port', str(bokeh_port)] +
                     sum([['--host', host] for host in hosts], []))
            if show:
                args.append('--show')

            bokeh_options = {'host': host if given_host else '127.0.0.1',
                             'http-port': http_port,
                             'tcp-port': port,
                             'bokeh-port': bokeh_port}
            with open('.dask-web-ui.json', 'w') as f:
                json.dump(bokeh_options, f, indent=2)

            if sys.version_info[0] >= 3:
                from bokeh.command.bootstrap import main
                ctx = multiprocessing.get_context('spawn')
                bokeh_proc = ctx.Process(target=main, args=(args,))
                bokeh_proc.daemon = True
                bokeh_proc.start()
            else:
                bokeh_proc = subprocess.Popen(args)

            logger.info(" Bokeh UI at:  http://%s:%d/status/"
                        % (ip, bokeh_port))
        except ImportError:
            logger.info("Please install Bokeh to get Web UI")
        except Exception as e:
            logger.warn("Could not start Bokeh web UI", exc_info=True)

    loop.start()
    loop.close()
    scheduler.stop()
    bokeh_proc.terminate()

    logger.info("End scheduler at %s:%d", ip, port)
Ejemplo n.º 14
0
    def loadAttributes( self, numberOfThreads = 7 ):
        
        # Put the driver directories in a queue
        ctx = multiprocessing.get_context('fork')
        driversInQueue = ctx.Queue()
        driverFiles = os.listdir( self.__dir )
        numberOfDriversToProcess = 0
        for driverFile in driverFiles:
            driverId = int(int(driverFile.split('.')[0]))
            driver = Driver( driverId )
            driversInQueue.put( driver )
            numberOfDriversToProcess += 1
        
        # The thread reading function
        def readDataFunction( inputQueue, outputQueue, driverTopDir ):
            while True:
                driver = inputQueue.get()
                driver.readTripsFromDirectory( self.__dir )
                numberOfTrips = driver.numberOfTrips()
                tripData = []
                for i in range(numberOfTrips):
                    trip = driver.getTrip( i + 1 )
                    values, labels = trip.attributes()
                    tripData.append( ( trip.id(), values, labels ) )
                outputQueue.put( (driver.id(), tripData ) )
            return

        # Start the reading threads
        threads = []
        driversOutQueue = ctx.Queue()

        for i in range( numberOfThreads):
            thread = ctx.Process( target = readDataFunction, args = (driversInQueue, driversOutQueue, self.__dir ) )
            thread.start()
            threads.append( thread )

        # Set up the logger
        log = ProcessLogger( numberOfDriversToProcess, "Drivers processed : " )
        outputData = []
        labels = []
        for i in range( numberOfDriversToProcess ):
            driverId, tripData = driversOutQueue.get()
            # Loop over the trips for this driver
            for trip in tripData:
                if len(labels) == 0: # This is the first entry. Retrieve the header.
                    labels.append( 'driverId' )
                    labels.append( 'tripId' )
                    for label in trip[2]:
                        labels.append(label)
                    outputData = numpy.array([]).reshape(0,len(labels))
                tripId = trip[0]
                attributes = trip[1]
                tripRow = numpy.hstack( (driverId, tripId, attributes) )
                outputData = numpy.vstack( (outputData, tripRow) )
            log.taskEnded()

        for t in threads:
            t.terminate()

        return pandas.DataFrame(outputData, columns=labels)
Ejemplo n.º 15
0
 def __init__(self, env_fns, spaces=None, context='spawn'):
     """
     If you don't specify observation_space, we'll have to create a dummy
     environment to get it.
     """
     ctx = mp.get_context(context)
     if spaces:
         observation_space, action_space = spaces
     else:
         logger.log('Creating dummy env object to get spaces')
         with logger.scoped_configure(format_strs=[]):
             dummy = env_fns[0]()
             observation_space, action_space = dummy.observation_space, dummy.action_space
             dummy.close()
             del dummy
     VecEnv.__init__(self, len(env_fns), observation_space, action_space)
     self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)
     self.obs_bufs = [
         {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}
         for _ in env_fns]
     self.parent_pipes = []
     self.procs = []
     with clear_mpi_env_vars():
         for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
             wrapped_fn = CloudpickleWrapper(env_fn)
             parent_pipe, child_pipe = ctx.Pipe()
             proc = ctx.Process(target=_subproc_worker,
                         args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
             proc.daemon = True
             self.procs.append(proc)
             self.parent_pipes.append(parent_pipe)
             proc.start()
             child_pipe.close()
     self.waiting_step = False
     self.viewer = None
Ejemplo n.º 16
0
 def _do():
   mp_ctx = multiprocessing.get_context('spawn')
   cpu_count = multiprocessing.cpu_count()
   with ThreadPoolExecutor(max_workers=cpu_count) as executor:
     yield from yield_pkgs(
         executor, submit_pkgs(executor, [Package.root_package()], mp_ctx),
         mp_ctx)
Ejemplo n.º 17
0
 def pool(self):
     return Pool(
         processes=self.processes,
         initializer=initializer,
         initargs=self.initargs,
         context=get_context('forkserver'),
     )
Ejemplo n.º 18
0
 def __init__(self, testcase_suite, manager):
     self.keep_alive_parent_end, self.keep_alive_child_end = Pipe(
         duplex=False)
     self.finished_parent_end, self.finished_child_end = Pipe(duplex=False)
     self.result_parent_end, self.result_child_end = Pipe(duplex=False)
     self.testcase_suite = testcase_suite
     if sys.version[0] == '2':
         self.stdouterr_queue = manager.StreamQueue()
     else:
         from multiprocessing import get_context
         self.stdouterr_queue = manager.StreamQueue(ctx=get_context())
     self.logger = get_parallel_logger(self.stdouterr_queue)
     self.child = Process(target=test_runner_wrapper,
                          args=(testcase_suite,
                                self.keep_alive_child_end,
                                self.stdouterr_queue,
                                self.finished_child_end,
                                self.result_child_end,
                                self.logger)
                          )
     self.child.start()
     self.last_test_temp_dir = None
     self.last_test_vpp_binary = None
     self._last_test = None
     self.last_test_id = None
     self.vpp_pid = None
     self.last_heard = time.time()
     self.core_detected_at = None
     self.testcases_by_id = {}
     self.testclasess_with_core = {}
     for testcase in self.testcase_suite:
         self.testcases_by_id[testcase.id()] = testcase
     self.result = TestResult(testcase_suite, self.testcases_by_id)
Ejemplo n.º 19
0
def testfs(tmpdir):

    # We can't use forkserver because we have to make sure
    # that the server inherits the per-test stdout/stderr file
    # descriptors.
    if hasattr(multiprocessing, 'get_context'):
        mp = multiprocessing.get_context('fork')
    else:
        # Older versions only support *fork* anyway
        mp = multiprocessing
    if threading.active_count() != 1:
        raise RuntimeError("Multi-threaded test running is not supported")

    mnt_dir = str(tmpdir)
    with mp.Manager() as mgr:
        cross_process = mgr.Namespace()
        mount_process = mp.Process(target=run_fs,
                                   args=(mnt_dir, cross_process))

        mount_process.start()
        try:
            wait_for_mount(mount_process, mnt_dir)
            yield (mnt_dir, cross_process)
        except:
            cleanup(mnt_dir)
            raise
        else:
            umount(mount_process, mnt_dir)
Ejemplo n.º 20
0
 def _launch(self):
     context = multiprocessing.get_context('fork')
     client_ch, server_ch = context.Pipe()
     self.process = context.Process(
         target=self.run_server, args=(server_ch, ), daemon=True)
     self.process.start()
     server_ch.close()
     self.client_ch = Connection.from_Connection(client_ch)
Ejemplo n.º 21
0
 def pool(self):
     return Pool(
         processes=self.queue_worker.processes,
         initializer=initializer,
         initargs=self.initargs,
         maxtasksperchild=self.maxtasks,
         context=get_context('forkserver'),
     )
Ejemplo n.º 22
0
 def init_pool(self):
     return Pool(
         processes=self.processes,
         initializer=initializer,
         initargs=self.initargs,
         maxtasksperchild=self.maxtasks,
         context=get_context('spawn'),
     )
Ejemplo n.º 23
0
    def test_no_collision(self):
        bar1 = self.import_bar1()
        bar2 = self.import_bar2()
        with capture_cache_log() as buf:
            res1 = bar1()
        cachelog = buf.getvalue()
        # bar1 should save new index and data
        self.assertEqual(cachelog.count('index saved'), 1)
        self.assertEqual(cachelog.count('data saved'), 1)
        self.assertEqual(cachelog.count('index loaded'), 0)
        self.assertEqual(cachelog.count('data loaded'), 0)
        with capture_cache_log() as buf:
            res2 = bar2()
        cachelog = buf.getvalue()
        # bar2 should save new index and data
        self.assertEqual(cachelog.count('index saved'), 1)
        self.assertEqual(cachelog.count('data saved'), 1)
        self.assertEqual(cachelog.count('index loaded'), 0)
        self.assertEqual(cachelog.count('data loaded'), 0)
        self.assertNotEqual(res1, res2)

        try:
            # Make sure we can spawn new process without inheriting
            # the parent context.
            mp = multiprocessing.get_context('spawn')
        except ValueError:
            print("missing spawn context")

        q = mp.Queue()
        # Start new process that calls `cache_file_collision_tester`
        proc = mp.Process(target=cache_file_collision_tester,
                          args=(q, self.tempdir,
                                self.modname_bar1,
                                self.modname_bar2))
        proc.start()
        # Get results from the process
        log1 = q.get()
        got1 = q.get()
        log2 = q.get()
        got2 = q.get()
        proc.join()

        # The remote execution result of bar1() and bar2() should match
        # the one executed locally.
        self.assertEqual(got1, res1)
        self.assertEqual(got2, res2)

        # The remote should have loaded bar1 from cache
        self.assertEqual(log1.count('index saved'), 0)
        self.assertEqual(log1.count('data saved'), 0)
        self.assertEqual(log1.count('index loaded'), 1)
        self.assertEqual(log1.count('data loaded'), 1)

        # The remote should have loaded bar2 from cache
        self.assertEqual(log2.count('index saved'), 0)
        self.assertEqual(log2.count('data saved'), 0)
        self.assertEqual(log2.count('index loaded'), 1)
        self.assertEqual(log2.count('data loaded'), 1)
Ejemplo n.º 24
0
def get_context():
    """ Return the current multiprocessing context."""
    if sys.platform == "win32" or sys.version_info.major == 2:
        # Just do the default, since we can't change it:
        if config.get("multiprocessing.context", None) is not None:
            warn(_CONTEXT_UNSUPPORTED, UserWarning)
        return multiprocessing
    context_name = config.get("multiprocessing.context", None)
    return multiprocessing.get_context(context_name)
Ejemplo n.º 25
0
def main():
    files = os.listdir(XML_FILES)
    ctx = mp.get_context('fork')
    q = ctx.Queue()

    for host in files:
        p = ctx.Process(target=worker, args=(host,q))
        p.start()
        p.join()
Ejemplo n.º 26
0
 def test(self):
   mp_ctx = multiprocessing.get_context('spawn')
   with ThreadPoolExecutor(max_workers=2) as executor:
     futs = [
         executor.submit(util.isolated(task, i, mp_ctx)) for i in range(10)
     ]
     time.sleep(2)
     self.assertTrue(all(f.done() for f in futs))
     self.assertTrue(all(f.result() % 2 == 0 for f in futs))
Ejemplo n.º 27
0
 def __init__(self, *args, **kwargs):
     if sys.version_info[0] <= 2:
         super(Queue, self).__init__(*args, **kwargs)
     else:
         super(Queue, self).__init__(*args, ctx=multiprocessing.get_context(),
                                     **kwargs)
     self._reader = ConnectionWrapper(self._reader)
     self._writer = ConnectionWrapper(self._writer)
     self._send = self._writer.send
     self._recv = self._reader.recv
Ejemplo n.º 28
0
 def __init__(self, cards):
     super(CardProgressQueue, self).__init__(
             len(cards), ctx=multiprocessing.get_context())
     self._cw = CardWidget()
     widgets = [self._cw, ' ', progressbar.widgets.Bar(left='[', right=']'), ' ',
                progressbar.widgets.SimpleProgress(), ' ', progressbar.widgets.ETA()]
     self._pbar = progressbar.bar.ProgressBar(widgets=widgets, max_value=len(cards))
     self._pbar.start()
     for c in cards:
         self.put(c)
Ejemplo n.º 29
0
 def __init__(self, processes=None, initializer=None, initargs=(),
              maxtasksperchild=None, context=None):
     if context is None:
         context = mp.get_context()
     context = _nondaemon_context_mapper[context._name]
     super(NonDaemonPool, self).__init__(processes=processes,
                                         initializer=initializer,
                                         initargs=initargs,
                                         maxtasksperchild=maxtasksperchild,
                                         context=context)
Ejemplo n.º 30
0
    def __init__(self,
                 n_channels=2,
                 sample_rate=44100,
                 sample_width=2,
                 output_device_id=0,
                 input_device_id=0,
                 master_volume=1.0):

        self.n_channels = n_channels
        self.sample_rate = sample_rate
        self.sample_width = sample_width

        self.source_bank = SourceBank()

        self.chunks_queue_size = 100

        ctx = multiprocessing.get_context('spawn')

        self.playback_queue = ctx.Queue(self.chunks_queue_size)
        self.recording_queue = ctx.Queue(self.chunks_queue_size)

        self.n_frames_per_chunk = 1024

        self.playback_producer = PlaybackQueueProducer(
            self.source_bank,
            self.playback_queue,
            self.n_channels,
            self.sample_rate,
            self.n_frames_per_chunk,
            master_volume=master_volume)

        self.audio_interface_process = ctx.Process(
            target=playback_consumer,
            args=(
                self.playback_queue,
                None, #self.recording_queue,
                self.n_channels,
                self.sample_rate,
                self.sample_width,
                self.n_frames_per_chunk,
                output_device_id,
                input_device_id))

        self.recording_process = ctx.Process(
            target=recording_consumer,
            args=(
                self.recording_queue,
                self.sample_rate))

        print("Launching processes")
        self.audio_interface_process.daemmon = True
        self.audio_interface_process.start()

        self.recording_process.daemmon = True
        self.recording_process.start()
Ejemplo n.º 31
0
class NoDaemonContext(type(multiprocessing.get_context())):
    Process = NoDaemonProcess
Ejemplo n.º 32
0
def main():
    graph, sess = load_graph(FLAGS.pre_trained_model_path)
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, FLAGS.width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FLAGS.height)
    mp = _mp.get_context("spawn")
    v = mp.Value('i', 0)
    lock = mp.Lock()
    process = mp.Process(target=mario, args=(v, lock))
    process.start()
    line.sendtext('Start Game')
    while True:
        key = cv2.waitKey(1)
        if key == ord("q"):
            line.sendtext('End Game')
            break
        _, frame = cap.read()
        frame = cv2.flip(frame, 1)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        boxes, scores, classes = detect_hands(frame, graph, sess)
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        results = predict(boxes, scores, classes, FLAGS.threshold, FLAGS.width,
                          FLAGS.height)

        if len(results) == 1:
            x_min, x_max, y_min, y_max, category = results[0]
            x = int((x_min + x_max) / 2)
            y = int((y_min + y_max) / 2)
            cv2.circle(frame, (x, y), 5, RED, -1)

            if category == "Open" and x <= FLAGS.width / 3:
                action = 7  # Left jump
                text = "Jump Backward"
                line.sendtext('Jump Backward')
            elif category == "Closed" and x <= FLAGS.width / 3:
                action = 6  # Left
                text = "Run Backward"
                line.sendtext('Run Backward')
            elif category == "Open" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3:
                action = 5  # Jump
                text = "Jump"
                line.sendtext('Jump')
            elif category == "Closed" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3:
                action = 0  # Do nothing
                text = "Stop"
                line.sendtext('Stop')
            elif category == "Open" and x > 2 * FLAGS.width / 3:
                action = 2  # Right jump
                text = "Jump Forward"
                line.sendtext('Jump Forward')
            elif category == "Closed" and x > 2 * FLAGS.width / 3:
                action = 1  # Right
                text = "Run Forward"
                line.sendtext('Run Forward')
            else:
                action = 0
                text = "Stop"
                line.sendtext('Stop')
            with lock:
                v.value = action
            cv2.putText(frame, "{}".format(text), (x_min, y_min - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, GREEN, 2)
        overlay = frame.copy()
        cv2.rectangle(overlay, (0, 0), (int(FLAGS.width / 3), FLAGS.height),
                      BLUE, -1)
        cv2.rectangle(overlay, (int(2 * FLAGS.width / 3), 0),
                      (FLAGS.width, FLAGS.height), BLUE, -1)
        cv2.addWeighted(overlay, FLAGS.alpha, frame, 1 - FLAGS.alpha, 0, frame)
        cv2.imshow('Detection', frame)

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 33
0
 def get_context(self):
     return get_context(self.ctx)
Ejemplo n.º 34
0
def run(config_cls=ConfigBuilder,
        route_builder=None,
        mp_context=None,
        log_handlers=None,
        **kwargs):
    logger = get_logger("INFO", log_handlers)

    if mp_context is None:
        if hasattr(multiprocessing, "get_context"):
            mp_context = multiprocessing.get_context()
        else:
            mp_context = MpContext()

    with build_config(logger,
                      os.path.join(repo_root, "config.json"),
                      config_cls=config_cls,
                      **kwargs) as config:
        # This sets the right log level
        logger = get_logger(config.log_level, log_handlers)

        bind_address = config["bind_address"]

        if kwargs.get("alias_file"):
            with open(kwargs["alias_file"], 'r') as alias_file:
                for line in alias_file:
                    alias, doc_root = [x.strip() for x in line.split(',')]
                    config["aliases"].append({
                        'url-path': alias,
                        'local-dir': doc_root,
                    })

        if route_builder is None:
            route_builder = get_route_builder
        routes = route_builder(logger, config.aliases, config).get_routes()

        if config["check_subdomains"]:
            check_subdomains(logger, config, routes, mp_context, log_handlers)

        stash_address = None
        if bind_address:
            stash_address = (config.server_host, get_port(""))
            logger.debug("Going to use port %d for stash" % stash_address[1])

        with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
            servers = start(logger, config, routes, mp_context, log_handlers,
                            **kwargs)

            if not kwargs["exit_after_start"]:
                try:
                    # Periodically check if all the servers are alive
                    server_process_exited = False
                    while not server_process_exited:
                        for server in iter_servers(servers):
                            server.proc.join(1)
                            if not server.proc.is_alive():
                                server_process_exited = True
                                break
                except KeyboardInterrupt:
                    pass

            failed_subproc = 0
            for server in iter_servers(servers):
                subproc = server.proc
                if subproc.is_alive():
                    logger.info('Status of subprocess "%s": running',
                                subproc.name)
                    server.stop(timeout=1)

                if server.proc.exitcode == 0:
                    logger.info('Status of subprocess "%s": exited correctly',
                                subproc.name)
                else:
                    logger.warning(
                        'Status of subprocess "%s": failed. Exit with non-zero status: %d',
                        subproc.name, subproc.exitcode)
                    failed_subproc += 1
            return failed_subproc
Ejemplo n.º 35
0
    def map_unordered(cls,
                      function,
                      items,
                      multiprocess=False,
                      file=None,
                      step=100,
                      ipython_widget=False,
                      multiprocessing_start_method=None):
        """Map function over items, reporting the progress.

        Does a `map` operation while displaying a progress bar with
        percentage complete. The map operation may run on arbitrary order
        on the items, and the results may be returned in arbitrary order.

        ::

            def work(i):
                print(i)

            ProgressBar.map(work, range(50))

        Parameters
        ----------
        function : function
            Function to call for each step

        items : sequence
            Sequence where each element is a tuple of arguments to pass to
            *function*.

        multiprocess : bool, int, optional
            If `True`, use the `multiprocessing` module to distribute each task
            to a different processor core. If a number greater than 1, then use
            that number of cores.

        ipython_widget : bool, optional
            If `True`, the progress bar will display as an IPython
            notebook widget.

        file : writable file-like, optional
            The file to write the progress bar to.  Defaults to
            `sys.stdout`.  If ``file`` is not a tty (as determined by
            calling its `isatty` member, if any), the scrollbar will
            be completely silent.

        step : int, optional
            Update the progress bar at least every *step* steps (default: 100).
            If ``multiprocess`` is `True`, this will affect the size
            of the chunks of ``items`` that are submitted as separate tasks
            to the process pool.  A large step size may make the job
            complete faster if ``items`` is very long.

        multiprocessing_start_method : str, optional
            Useful primarily for testing; if in doubt leave it as the default.
            When using multiprocessing, certain anomalies occur when starting
            processes with the "spawn" method (the only option on Windows);
            other anomalies occur with the "fork" method (the default on
            Linux).
        """
        # concurrent.futures import here to avoid import failure when running
        # in pyodide/Emscripten
        from concurrent.futures import ProcessPoolExecutor, as_completed

        results = []

        if file is None:
            file = _get_stdout()

        with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
            if bar._ipython_widget:
                chunksize = step
            else:
                default_step = max(int(float(len(items)) / bar._bar_length), 1)
                chunksize = min(default_step, step)
            if not multiprocess or multiprocess < 1:
                for i, item in enumerate(items):
                    results.append(function(item))
                    if (i % chunksize) == 0:
                        bar.update(i)
            else:
                ctx = multiprocessing.get_context(multiprocessing_start_method)
                kwargs = dict(mp_context=ctx)

                with ProcessPoolExecutor(
                        max_workers=(int(multiprocess)
                                     if multiprocess is not True else None),
                        **kwargs) as p:
                    for i, f in enumerate(
                            as_completed(
                                p.submit(function, item) for item in items)):
                        bar.update(i)
                        results.append(f.result())

        return results
Ejemplo n.º 36
0
    async def __anit__(self, core):

        await s_base.Base.__anit__(self)

        self.core = core
        self.iden = s_common.guid()
        self.proc = None

        self.ready = asyncio.Event()
        self.mpctx = multiprocessing.get_context('spawn')

        name = f'SpawnProc#{self.iden[:8]}'
        self.threadpool = concurrent.futures.ThreadPoolExecutor(
            max_workers=2, thread_name_prefix=name)

        self.todo = self.mpctx.Queue()
        self.done = self.mpctx.Queue()
        self.proc = None  # type: multiprocessing.Process
        self.procstat = None
        self.obsolete = False

        spawninfo = await core.getSpawnInfo()
        self.finievent = threading.Event()

        @s_common.firethread
        def procwaiter():
            '''
            Wait for child process to exit
            '''
            self.procstat = self.proc.join()
            self.proc.close()
            if not self.isfini:
                self.schedCoroSafe(self.fini())

        @s_common.firethread
        def finiwaiter():
            '''
            Wait for the SpawnProc to complete on another thread (so we can block)
            '''
            self.finievent.wait()
            self.todo.put(None)
            self.todo.close()
            self.done.put(None)
            self.done.close()
            self.todo.join_thread()
            self.done.join_thread()
            if self.procstat is None:
                try:
                    self.proc.terminate()
                except ValueError:
                    pass
            self.threadpool.shutdown()

        # avoid blocking the ioloop during process construction
        def getproc():
            self.proc = self.mpctx.Process(target=corework,
                                           args=(spawninfo, self.todo,
                                                 self.done))
            self.proc.start()

        await self.executor(getproc)
        finiwaiter()
        procwaiter()

        async def fini():
            self.obsolete = True
            self.finievent.set()

        self.onfini(fini)
Ejemplo n.º 37
0
    def prepare(self):
        """Prepares task for training, populates all derived attributes """

        pin_memory = self.use_gpu and torch.cuda.device_count() > 1

        self.phases = self._build_phases()
        self.train = False if self.test_only else self.train
        self.dataloaders = self.build_dataloaders(
            current_phase_id=0,
            pin_memory=pin_memory,
            multiprocessing_context=mp.get_context(self.dataloader_mp_context),
        )

        if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
            self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(
                self.base_model)
        elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
            sync_bn_process_group = apex.parallel.create_syncbn_process_group(
                self.batch_norm_sync_group_size)
            self.base_model = apex.parallel.convert_syncbn_model(
                self.base_model, process_group=sync_bn_process_group)

        # move the model and loss to the right device
        if self.use_gpu:
            self.base_model, self.loss = copy_model_to_gpu(
                self.base_model, self.loss)
        else:
            self.loss.cpu()
            self.base_model.cpu()

        if self.optimizer is not None:
            self.prepare_optimizer(optimizer=self.optimizer,
                                   model=self.base_model,
                                   loss=self.loss)

        if self.amp_args is not None:
            # Initialize apex.amp. This updates the model and the PyTorch optimizer (
            # if training, which is wrapped by the ClassyOptimizer in self.optimizer).
            # Please note this must happen before loading the checkpoint, cause
            # there's amp state to be restored.

            if self.optimizer is None:
                self.base_model = apex.amp.initialize(self.base_model,
                                                      optimizers=None,
                                                      **self.amp_args)
            else:
                self.base_model, self.optimizer.optimizer = apex.amp.initialize(
                    self.base_model, self.optimizer.optimizer, **self.amp_args)

        if self.checkpoint_path:
            self.checkpoint_dict = load_and_broadcast_checkpoint(
                self.checkpoint_path)

        classy_state_dict = (None if self.checkpoint_dict is None else
                             self.checkpoint_dict["classy_state_dict"])

        if classy_state_dict is not None:
            state_load_success = update_classy_state(self, classy_state_dict)
            assert (state_load_success
                    ), "Update classy state from checkpoint was unsuccessful."

        self.init_distributed_data_parallel_model()
Ejemplo n.º 38
0
        n_bins = opts.nbins
        n_channels = header['channels'][-1]

        fs = 2* n_channels * 125e6 / 2048
        time_bin_size = 2 * (n_bins) /fs
        #might be useful


        data_new = {
        'pol0' : [],
        'pol1' : []
        }
        print(np.shape(data['pol0']))
        ## the actual rebinning
        ##for both pollarizations
        with get_context("spawn").Pool() as pool:
            for key in ['pol0', 'pol1']:
                print("re-binning:", key)
                pol = data[key]
                ##split up the job into lines per core
                job = [(np.clip(pol[x:(x+lines_per_core), :],-0.1,0.1), n_bins) for x in range(0,np.shape(pol)[0]-1,lines_per_core)]
                ##assign the job
                result = pool.starmap(spec_resolve, job)
                ##collaps result
                data_new[key] = np.vstack(result)

        print(np.shape(data_new['pol0']))



        # print 'Data dimensions are:', np.shape(data['pol0']), 'and', np.shape(data['pol1'])
Ejemplo n.º 39
0
import random
import multiprocessing as mp
import StringMatcher_Interface as smi


def cpu_heavy(x, q):
    print('Starting')

    print("Done")


if __name__ == '__main__':
    procs = 4
    jobs = list()
    queues = list()
    for i in range(0, procs):
        ctx = mp.get_context('spawn')
        q = ctx.Queue()
        process = ctx.Process(target=cpu_heavy, args=('hf', q))
        jobs.append(process)
        queues.append(q)
    for j in jobs:
        j.start()
    for q in queues:
        print(q.get())
    for j in jobs:
        j.join()
    def runModelsChunksSkipMP(self, dataOfInterest, daysToCheck = None, earlyStop=False):
        xVals, yVals, yIndex, xToday = self.generateWindows(dataOfInterest)
        mpEngine = mp.get_context('fork')
        with mpEngine.Manager() as manager:
            returnDict = manager.dict()
            
            identifiersToCheck = []
            
            for i in range(len(xVals) - 44): ##44 is lag...should not overlap with any other predictions or will ruin validity of walkforward optimization
                if i < 600:
                    ##MIN TRAINING
                    continue
                identifiersToCheck.append(str(i))
                
            if daysToCheck is not None:
                identifiersToCheck = identifiersToCheck[-daysToCheck:]


            ##FIRST CHECK FIRST 500 IDENTIFIERS AND THEN IF GOOD CONTINUE
            

            identifierWindows = [identifiersToCheck[:252], identifiersToCheck[252:600], identifiersToCheck[600:900], identifiersToCheck[900:1200], identifiersToCheck[1200:]] ##EXACTLY TWO YEARS
            if earlyStop == False:
                identifierWindows = [identifiersToCheck]
            returnStream = None
            factorReturn = None
            predictions = None
            slippageAdjustedReturn = None
            rawPredictions = None
            shortSeen = 0 if earlyStop == True else -1
            for clippedIdentifiers in identifierWindows:
                
                splitIdentifiers = np.array_split(np.array(clippedIdentifiers), 4)
                
                
                runningP = []
                k = 0
                for identifiers in splitIdentifiers:
                    p = mpEngine.Process(target=CurvePredictor.runDayChunking, args=(self, xVals, yVals, identifiers, returnDict,k))
                    p.start()
                    runningP.append(p)
                    
                    k += 1
                    

                while len(runningP) > 0:
                    newP = []
                    for p in runningP:
                        if p.is_alive() == True:
                            newP.append(p)
                        else:
                            p.join()
                    runningP = newP
                    
                
                preds = []
                actuals = []
                days = []
                for i in clippedIdentifiers:
                    preds.append(returnDict[i])
                    actuals.append(yVals[int(i) + 44])
                    days.append(yIndex[int(i) + 44])

                ##CREATE ACCURATE BLENDING ACROSS DAYS
                predsTable = pd.DataFrame(preds, index=days, columns=["Predictions"])
                
                i = 1
                tablesToJoin = []
                while i < self.predictionDistance:
                    thisTable = predsTable.shift(i)
                    thisTable.columns = ["Predictions_" + str(i)]
                    tablesToJoin.append(thisTable)
                    i += 1
                
                predsTable = predsTable.join(tablesToJoin)
                transformedPreds = pd.DataFrame(predsTable.apply(lambda x:dataAck.computePosition(x), axis=1), columns=["Predictions"]).dropna()
                dailyFactorReturn = dataAck.getDailyFactorReturn(self.targetTicker, dataOfInterest)
                transformedPreds = transformedPreds.join(dailyFactorReturn).dropna()
                returnStream = pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"]) if returnStream is None else pd.concat([returnStream, pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"])])
                factorReturn = pd.DataFrame(transformedPreds[["Factor Return"]]) if factorReturn is None else pd.concat([factorReturn, pd.DataFrame(transformedPreds[["Factor Return"]])])
                predictions = pd.DataFrame(transformedPreds[["Predictions"]]) if predictions is None else pd.concat([predictions, pd.DataFrame(transformedPreds[["Predictions"]])])
                rawPredictions = pd.DataFrame(preds, index=days, columns=["Predictions"]) if rawPredictions is None else pd.concat([rawPredictions, pd.DataFrame(preds, index=days, columns=["Predictions"])])
                
                alpha, beta = empyrical.alpha_beta(returnStream, factorReturn)
                activity = np.count_nonzero(returnStream)/float(len(returnStream))
                rawBeta = abs(empyrical.alpha_beta(returnStream.apply(lambda x:dataAck.applyBinary(x), axis=0), factorReturn.apply(lambda x:dataAck.applyBinary(x), axis=0))[1])
                shortSharpe = empyrical.sharpe_ratio(returnStream)
                activity = np.count_nonzero(returnStream)/float(len(returnStream))
                algoAnnualReturn = empyrical.annual_return(returnStream.values)[0]
                algoVol = empyrical.annual_volatility(returnStream.values)
                factorAnnualReturn = empyrical.annual_return(factorReturn.values)[0]
                factorVol = empyrical.annual_volatility(factorReturn.values)
                treynor = ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \
                           / abs(empyrical.beta(returnStream, factorReturn)))
                sharpeDiff = empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn)
                relativeSharpe = sharpeDiff / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn)))
                stability = empyrical.stability_of_timeseries(returnStream)

                ##CALCULATE SHARPE WITH SLIPPAGE
                estimatedSlippageLoss = portfolioGeneration.estimateTransactionCost(predictions)
                estimatedSlippageLoss.columns = returnStream.columns
                slippageAdjustedReturn = (returnStream - estimatedSlippageLoss).dropna()
                slippageSharpe = empyrical.sharpe_ratio(slippageAdjustedReturn)
                sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn)
                relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn)))
                profitability = len((returnStream.values)[returnStream.values > 0])/len(returnStream.values)
                

                rollingProfitability = returnStream.rolling(45, min_periods=45).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values
                minRollingProfitability = np.percentile(rollingProfitability, 1)
                twentyFifthPercentileRollingProfitablity = np.percentile(rollingProfitability, 25)


                if np.isnan(shortSharpe) == True:
                    return None, {"sharpe":shortSharpe}, None, None, None

                elif (profitability < 0.4  or activity < 0.3 or abs(rawBeta) > 0.4 or stability < 0.3) and shortSeen == 0:
                    return None, {
                            "sharpe":shortSharpe, ##OVERLOADED IN FAIL
                            "activity":activity,
                            "factorSharpe":empyrical.sharpe_ratio(factorReturn),
                            "sharpeSlippage":slippageSharpe,
                            "beta":abs(beta),
                            "alpha":alpha,
                            "activity":activity,
                            "treynor":treynor,
                            "period":"first 252 days",
                            "algoReturn":algoAnnualReturn,
                            "algoVol":algoVol,
                            "factorReturn":factorAnnualReturn,
                            "factorVol":factorVol,
                            "sharpeDiff":sharpeDiff,
                            "relativeSharpe":relativeSharpe,
                            "sharpeDiffSlippage":sharpeDiffSlippage,
                            "relativeSharpeSlippage":relativeSharpeSlippage,
                            "rawBeta":rawBeta,
                            "minRollingProfitability":minRollingProfitability,
                            "stability":stability,
                            "twentyFifthPercentileRollingProfitablity":twentyFifthPercentileRollingProfitablity,
                            "profitability":profitability
                    }, None, None, None
                
                elif abs(rawBeta) > 0.33 or activity < 0.3 or stability < 0.4 or twentyFifthPercentileRollingProfitablity < 0.41 \
                     or minRollingProfitability < 0.3 or profitability < 0.46:
                    periodName = "first 600 days"
                    if shortSeen == 2:
                        periodName = "first 900 days"
                    elif shortSeen == 3:
                        periodName = "first 1200 days"
                    return None, {
                            "sharpe":shortSharpe, ##OVERLOADED IN FAIL
                            "activity":activity,
                            "factorSharpe":empyrical.sharpe_ratio(factorReturn),
                            "sharpeSlippage":slippageSharpe,
                            "alpha":alpha,
                            "beta":abs(beta),
                            "activity":activity,
                            "treynor":treynor,
                            "period":periodName,
                            "algoReturn":algoAnnualReturn,
                            "algoVol":algoVol,
                            "factorReturn":factorAnnualReturn,
                            "factorVol":factorVol,
                            "minRollingProfitability":minRollingProfitability,
                            "sharpeDiff":sharpeDiff,
                            "relativeSharpe":relativeSharpe,
                            "sharpeDiffSlippage":sharpeDiffSlippage,
                            "relativeSharpeSlippage":relativeSharpeSlippage,
                            "rawBeta":rawBeta,
                            "stability":stability,
                            "twentyFifthPercentileRollingProfitablity":twentyFifthPercentileRollingProfitablity,
                            "profitability":profitability
                    }, None, None, None
                    
                elif shortSeen < 4:
                    print("CONTINUING", "SHARPE:", shortSharpe, "SHARPE DIFF:", sharpeDiff, "RAW BETA:", rawBeta, "TREYNOR:", treynor)
               
                shortSeen += 1

            return returnStream, factorReturn, predictions, slippageAdjustedReturn, rawPredictions
def test_sketch_search_policy_basic_spawn():
    ctx = multiprocessing.get_context("spawn")
    p = ctx.Process(target=sketch_search_policy_basic_spawn)
    p.start()
    p.join()
Ejemplo n.º 42
0
 def __init__(self, ctx):
     mgr = get_context(ctx).Manager()
     self.event = mgr.Event()