Example #1
0
    def __init__(
        self,
        config: Config,
        real_job: typing.Callable[..., typing.Any],
    ) -> None:
        self.config = config

        local_read_pipe, local_write_pipe = Pipe(duplex=False)
        process_read_pipe, process_write_pipe = Pipe(duplex=False)

        self.local_read_pipe = local_read_pipe  # type: Connection
        self.local_write_pipe = local_write_pipe  # type: Connection
        self.process_read_pipe = process_read_pipe  # type: Connection
        self.process_write_pipe = process_write_pipe  # type: Connection

        self.real_job = real_job
        self.process = Process(
            target=self.work,
            args=(
                self.local_write_pipe,
                self.process_read_pipe,
            ),
            kwargs={'seed': random.random()},
            name='Worker',
        )
        self.db = None  # TODO delete
        self.process.start()
Example #2
0
 def __init__(self,
              cluster_cls: Type[Cluster],
              cluster_kwargs: Optional[dict] = None):
     self.cluster_cls = cluster_cls
     self.cluster_kwargs = cluster_kwargs or {}
     # must initialize the pipes before calling `run()`
     self.__cmd_pipe = Pipe()
     self.__result_pipe = Pipe()
     super().__init__()
Example #3
0
    def load(self):
        control_receiver, control_sender = Pipe(duplex=False)
        events_receiver, events_sender = Pipe(duplex=False)
        self.add_control_connection(control_receiver, "Manager")
        self.add_events_connection(events_sender, "Manager")

        # Start the process, this will call  _run()
        self.process.start()

        return control_sender, events_receiver
Example #4
0
    def test_cython_wrapper(self):
        serials = bladerf.get_device_list()
        print("Connected serials", serials)

        bladerf.open()

        bladerf.set_tx(True)
        bladerf.set_channel(0)
        print("set gain", bladerf.set_gain(20))
        print("set gain", bladerf.set_gain(21))

        bladerf.set_tx(False)
        bladerf.set_channel(1)
        print("Sample Rate", bladerf.get_sample_rate())
        print("Set sample rate to 2e6", bladerf.set_sample_rate(int(2e6)))
        print("sample rate", bladerf.get_sample_rate())
        print("Set sample rate to 40e6", bladerf.set_sample_rate(int(40e6)))
        print("sample rate", bladerf.get_sample_rate())
        print("Set sample rate to 200e6", bladerf.set_sample_rate(int(200e6)))
        print("sample rate", bladerf.get_sample_rate())

        bladerf.set_tx(True)
        bladerf.set_channel(1)
        print("Bandwidth", bladerf.get_bandwidth())
        print("Set Bandwidth to 2e6", bladerf.set_bandwidth(int(2e6)))
        print("Bandwidth", bladerf.get_bandwidth())

        bladerf.set_tx(False)
        bladerf.set_channel(0)
        print("Frequency", bladerf.get_center_freq())
        print("Set Frequency to 433.92e6",
              bladerf.set_center_freq(int(433.92e6)))
        print("Frequency", bladerf.get_center_freq())

        bladerf.prepare_sync()

        parent_conn, child_conn = Pipe()

        for i in range(3):
            bladerf.receive_sync(child_conn, 4096)
            data = parent_conn.recv_bytes()
            print(data)

        bladerf.close()

        bladerf.open()
        bladerf.set_tx(True)
        bladerf.set_channel(0)
        bladerf.prepare_sync()

        for i in range(10):
            print("Send", bladerf.send_sync(np.fromstring(data,
                                                          dtype=np.int16)))
        bladerf.close()
Example #5
0
 def create_worker(self):
     a, b = Pipe()
     with a:
         cmd = 'from {0} import run_main, {1}; run_main({2!r}, {1})'.format(
             self.__class__.__module__, 'worker_main', a.fileno())
         p = start_worker(cmd, (a.fileno(), ))
     sys.stdout.flush()
     p.stdin.close()
     w = Worker(p, b, self.events, self.name)
     if self.common_data != pickle_dumps(None):
         w.set_common_data(self.common_data)
     return w
def get_state_value(conn: Connection = None, key=None, prgh_queue=None):
    h, c = Pipe(duplex=True)

    state_message = ModuleMessage("PRGH", "get_state_data", (key, c))
    if conn is not None:
        conn.send(state_message)
    elif prgh_queue is not None:
        prgh_queue.put(state_message)
    else:
        raise Exception("get_state_value() was called without a pipe or queue")

    msg = h.recv()
    h.close()
    return msg
Example #7
0
    def test_c_wrapper(self):
        def pycallback(data):
            arr = np.asarray(data)
            #result = np.empty(len(arr) // 2, dtype=np.complex64)
            #result.real = (arr[::2] + 0.5) / 32767.5
            #result.imag = (arr[1::2] + 0.5) / 32767.5

        print(sdrplay.get_api_version())
        print(sdrplay.get_devices())
        print(sdrplay.set_device_index(0))

        parent_conn, child_conn = Pipe()
        p = Process(target=recv, args=(parent_conn, ))
        p.daemon = True
        p.start()

        null_ptr = ctypes.POINTER(ctypes.c_voidp)()
        print("Init stream",
              sdrplay.init_stream(50, 2e6, 433.92e6, 2e6, 500, child_conn))

        time.sleep(2)
        print("settings sample rate")
        print("Set sample rate", sdrplay.set_sample_rate(2e6))

        time.sleep(1)
        p.terminate()
        p.join()
Example #8
0
    def __enter__(self):
        parent_pipe, child_pipe = Pipe()
        childpid = os.fork()

        if childpid != 0:
            self.childpid = childpid
            self._parent_setup()
            self.__pipe = parent_pipe
            frame = self.__get_context_frame()
            self.__inject_trace_func(frame, self.__exit_context)
            return self
        else:
            frame = self.__get_context_frame()
            self.__child_orig_locals = dict(frame.f_locals)
            self.__pipe = child_pipe

            try:
                self._child_setup()
            # pylint: disable=W0703
            # need to catch all exceptions here since we are passing them to
            # the parent process
            except Exception as exc:
                exc.__traceback_list__ = traceback.format_exc()
                self._child_exit(exc)

            return self
def get_config_value(conn: Connection = None, key=None, prgh_queue=None):
    h, c = Pipe(duplex=True)
    cfg_msg = cfg_transaction(key, is_set_var=False, reply=c)

    config_message = ModuleMessage("PRGH", CFGT.CFG_GET, cfg_msg)
    if conn is not None:
        conn.send(config_message)
    elif prgh_queue is not None:
        prgh_queue.put(config_message)
    else:
        raise Exception(
            "get_config_value() was called without a pipe or queue")

    msg = h.recv()
    h.close()
    return msg
Example #10
0
 def __init__(self):
     self._reader, self._writer = Pipe(duplex=False)
     self._rlock = Lock()
     self._poll = self._reader.poll
     if sys.platform == 'win32':
         self._wlock = None
     else:
         self._wlock = Lock()
Example #11
0
 def __init__(self):
     self._reader, self._writer = Pipe(duplex=False)
     self._rlock = Lock()
     self._poll = self._reader.poll
     if sys.platform == 'win32':
         self._wlock = None
     else:
         self._wlock = Lock()
     self._make_methods()
Example #12
0
def Pipe(duplex=True):
    '''
    Returns two connection object connected by a pipe
    '''
    if sys.version_info[0] == 3:
        from multiprocessing.connection import Pipe
    else:
        from billiard._connection import Pipe
    return Pipe(duplex)
Example #13
0
    def test_cython_wrapper(self):
        print(usrp.find_devices(""))

        usrp.set_tx(False)

        return_code = usrp.open("addr=192.168.10.2")
        print("open", return_code)

        usrp.setup_stream()
        print("Made rx_streame handler")

        print(usrp.get_device_representation())

        print("Set sample rate", usrp.set_sample_rate(2e6))
        print("Set freq", usrp.set_center_freq(433.92e6))
        print("Set bandwidth", usrp.set_bandwidth(1e6))
        print("Set gain", usrp.set_rf_gain(0.5))



        buffer = bytearray()

        num_samples = 32768 // 2

        usrp.start_stream(num_samples)
        parent_conn, child_conn = Pipe()

        for i in range(500):
            usrp.recv_stream(child_conn, num_samples)
            received_bytes = parent_conn.recv_bytes()
            #print(received_bytes)
            print(i)
            buffer.extend(received_bytes)
            #print(USRP.unpack_complex(received_bytes, len(received_bytes) // 8))

        f = open("/tmp/test.complex", "wb")
        f.write(buffer)
        f.close()

        usrp.destroy_stream()
        print("Freed rx streamer handler")

        return_code = usrp.close()
        print("close", return_code)
Example #14
0
 def on_input(self, func):
     *_, last = self.all_steps  # Get the last step
     receiver, sender = Pipe(duplex=False)
     for plugin in last[1]:
         connection = ("Manager", sender)
         control_manager = self.control_manager(plugin)
         control_manager.send("output")
         control_manager.send(connection)
     self.receiver = receiver
     self.on_input_func = func
Example #15
0
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
Example #16
0
 def __init__(self, source):
     self.source = source
     self.tmp_dir = PropertyManager.getSharedInstance(
     )["temporary_directory"]
     (self.wavefilename, self.wavefile) = self.getWaveFile()
     self.switchingLock = threading.Lock()
     self.timer = None
     (self.outputReader, self.outputWriter) = Pipe()
     self.doRun = True
     super().__init__()
Example #17
0
    def __enter__(self):
        parent_pipe, child_pipe = Pipe()

        if pid := os.fork():
            self.childpid = pid
            self._parent_setup()
            self.__pipe = parent_pipe
            frame = self.__get_context_frame()
            self.__inject_trace_func(frame, self.__exit_context)
            return self
Example #18
0
 def __init__(self, dsp, source, profile: AudioChopperProfile):
     self.dsp = dsp
     self.source = source
     self.profile = profile
     self.tmp_dir = Config.get()["temporary_directory"]
     self.wavefile = None
     self.wavefilename = None
     self.switchingLock = threading.Lock()
     self.timer = None
     (self.outputReader, self.outputWriter) = Pipe()
Example #19
0
    def test_terminate_method_with_Pipe(self, num_threads=50):
        readable, writable = Pipe(duplex=False)
        threads = self._start_threads(readable.recv_bytes, num_threads)

        writable.send_bytes(b"spam")
        time.sleep(self.WAITSEC)
        self.assertNumAliveThreads(num_threads - 1, threads,
                                   "No thread has ended, %r" % threads)

        self._terminate_still_alive(threads)
        for t in threads:
            self.assertRaises(OSError, t.terminate)
        self.assertNumAliveThreads(
            0, threads, "Some threads are still alive, %r" % threads)

        for i in range(num_threads):
            data = ("%d" % i).encode("ascii")
            writable.send_bytes(data)
            self.assertEqual(data, readable.recv_bytes(),
                             "Someone seems to intercept pipe, %r" % threads)
Example #20
0
 def __init__(self, active_dsp, mode_str: str):
     self.read_fn = None
     self.doRun = True
     self.dsp = active_dsp
     self.writers = []
     mode = Modes.findByModulation(mode_str)
     if mode is None or not isinstance(mode, AudioChopperMode):
         raise ValueError(
             "Mode {} is not an audio chopper mode".format(mode_str))
     self.profile_source = mode.get_profile_source()
     (self.outputReader, self.outputWriter) = Pipe()
     super().__init__()
Example #21
0
    def _connect(self, node: Node) -> bool:
        if isinstance(node, Analyzer):
            from_analyzer, node.output = Pipe(duplex=False)
            self._from_analyzers.append(from_analyzer)

            node.input, to_analyzer = Pipe(duplex=False)
            self._to_analyzers.append(to_analyzer)
            self._analyzer_counter[to_analyzer] = 0

        elif isinstance(node, Fetcher):
            # Fetcher does not need to listen from Router
            self._from_fetcher, node.output = Pipe(duplex=False)

        elif isinstance(node, Broker):
            self._from_broker, node.output = Pipe(duplex=False)
            node.input, self._to_broker = Pipe(duplex=False)

        elif isinstance(node, Ledger):
            node.input, self._to_ledger = Pipe(duplex=False)

        elif isinstance(node, Router):
            pass

        else:
            raise TypeError(type(node))

        logger.debug(f'Connected to {node.name}')

        return True
Example #22
0
    def connect(self, plugin1, plugin2, extra_label=''):
        """ create plugin1 output to plugin2 input """

        receiver, sender = Pipe(duplex=False)

        connection = (plugin2.instance_id, sender)
        control_manager = self.control_manager(plugin1)

        control_manager.send("output " + extra_label)
        control_manager.send(connection)

        connection = (plugin1.instance_id, receiver)
        control_manager = self.control_manager(plugin2)
        control_manager.send("input")
        control_manager.send(connection)
def mult_fast(A, B):
    thread_cnt = 4
    n = len(A) // thread_cnt
    threads = []
    cs = []
    for i in range(thread_cnt):
        p_conn, c_conn = Pipe()
        # th = MyThread(A[n*i:n*(i+1)], B)
        th = Process(target=mult_s, args=(A[n * i:n * (i + 1)], B, c_conn))
        cs.append(p_conn)
        threads.append(th)
        th.start()
    res = []
    # for t in threads:
    # t.join()
    # res += t.res
    for (t, c) in zip(threads, cs):
        res += c.recv()
    return res
Example #24
0
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
Example #25
0
    def create_local_worker(self, wclass, *args, **kwargs):
        ours, theirs = Pipe()
        self.pipes.append(ours)

        spawnf = multiprocessing.Process
        if 'spawnf' in kwargs:
            spawnf = kwargs['spawnf']
            del kwargs['spawnf']

        proc = spawnf(target=create_worker,
                      args=(
                          theirs,
                          wclass,
                          *args,
                      ),
                      kwargs=kwargs)
        self.local_processes.append(proc)
        proc.start()
        self.on_new_worker(len(self.pipes) - 1)
Example #26
0
    def _process_execute(child_pipe: Pipe,
                         *args,
                         network_type: NetworkTypes = None,
                         **kwargs):

        last_update = time.time()
        AGENT = network_factory(network_type=network_type)
        AGENT.load()

        while True:
            if not child_pipe.poll():
                time.sleep(0.01)
                continue

            request = child_pipe.recv()

            if isinstance(request, PredictionRequest):

                if np.random.rand(
                ) <= AGENT._session_epsilon or request.no_random:
                    result = AGENT.predict_random(request.data)
                else:
                    result = AGENT.predict(request.data)

                child_pipe.send(PredictionResult(result=result))

            elif isinstance(request, EpisodeResult):
                for item in request.game_data:
                    AGENT.memory.append(item)
                    if len(AGENT.memory
                           ) > AGENT.config.observe_frames_before_learning:
                        AGENT.fit_batch()
                        # logger.debug("[KerasProcess] Fit Batch Complete", runtime=time.time()-start_time, batch_size=batch_size)
                # Let the runner know that we're done and ready for another task.
                child_pipe.send(True)
            elif request is None:
                AGENT.save()
                # Shutdown request
                if request is None:
                    return

            if (time.time() - last_update) / 60 > 5:
                # Only print updates and save every 5 minutes
                last_update = time.time()
                logger.debug("KERAS PROCESS UPDATE",
                             epsilon=AGENT._session_epsilon,
                             memory_len=len(AGENT.memory))
                # logger.debug("Stats", loss=np.mean(AGENT.loss_history), acc=np.mean(AGENT.acc_history))
                AGENT.save()
Example #27
0
    def __init__(
        self,
        name: str,
        target: Callable,
        context: SpawnContext,
        log_queue: Queue,
        signal_handler: Callable[[int, FrameType], None],
        event_callback: Callable[[Event], None],
    ):
        self._name = name
        self._target = target
        self._context = context
        self._log_queue = log_queue
        self._signal_handler = signal_handler

        self._process = None
        self._ep_conn, self._mp_conn = Pipe()
        self._event_listener = PipeListener(
            conn=self._mp_conn,
            action=event_callback,
            name=f"{name} listener",
        )
Example #28
0
    def __enter__(self):
        parent_pipe, child_pipe = Pipe()
        childpid = os.fork()

        if childpid != 0:
            self.parent_setup()
            self.childpid = childpid
            self.__pipe = parent_pipe
            frame = self.__get_context_frame()
            self.__inject_trace_func(frame, self.__exit_context)

            return self

        else:
            self.__pipe = child_pipe

            try:
                self.child_setup()

            # pylint: disable=W0703
            # need to catch all exceptions here since we are passing them to
            # the parent process
            except Exception as e:
                e.__traceback_list__ = traceback.format_exc()
                self.__pipe.send(e)
                try:
                    self.__pipe.send(SystemExit())
                except (
                        BrokenPipeError if sys.hexversion >= 0x03030000  # pylint: disable=E0602
                        else OSError,
                        IOError) as e:
                    if e.errno in (errno.EPIPE, errno.ESHUTDOWN):
                        pass
                    else:
                        raise
                os._exit(0)  # pylint: disable=W0212
                # we don't want SystemExit being caught here

            return self
Example #29
0
 def launch_worker_process(self):
     from calibre.utils.ipc.pool import start_worker
     control_a, control_b = Pipe()
     data_a, data_b = Pipe()
     with control_a, data_a:
         pass_fds = control_a.fileno(), data_a.fileno()
         self.worker_process = p = start_worker(
             'from {0} import run_main, {1}; run_main({2}, {3}, {1})'.
             format(self.__class__.__module__, self.worker_entry_point,
                    *pass_fds), pass_fds)
         p.stdin.close()
     self.control_conn = control_b
     self.data_conn = data_b
     self.data_thread = t = Thread(name='CWData',
                                   target=self.handle_data_requests)
     t.daemon = True
     t.start()
     self.connected.set()
Example #30
0
class SimpleQueue(object):

    def __init__(self):
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._poll = self._reader.poll
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()

    def empty(self):
        return not self._poll()

    def __getstate__(self):
        assert_spawning(self)
        return (self._reader, self._writer, self._rlock, self._wlock)

    def __setstate__(self, state):
        (self._reader, self._writer, self._rlock, self._wlock) = state

    def get(self):
        with self._rlock:
            res = self._reader.recv_bytes()
        # unserialize the data after having released the lock
        return ForkingPickler.loads(res)

    def put(self, obj):
        # serialize the data before acquiring the lock
        obj = ForkingPickler.dumps(obj)
        if self._wlock is None:
            # writes to a message oriented win32 pipe are atomic
            self._writer.send_bytes(obj)
        else:
            with self._wlock:
                self._writer.send_bytes(obj)
def runBaseline(config, data_dictionary, train_test_folds):
    program_start_time = time()

    # assign all program arguments to local variables
    Runs = config['runs']
    LeadTimes = config['lead_times']
    SliceLength = config['slice_size']
    TestFraction = config['test_fraction']
    BatchSize = config['batch_size']
    stations = config['stations']
    n_stations = len(stations)
    n_loaders = config['n_loaders']
    n_workers = config['n_workers']

    # update general static model information
    experiment_info = config
    experiment_info['experiment'] = 'bias_corrected_baseline'
    experiment_info['train_test_distribution'] = []

    # output path for experiment information
    setting_string = 'bias_corrected_baseline_tf_%s_sl_%s' % (TestFraction, SliceLength)
    output_path = '%s/%s' % (config['experiment_path'], setting_string)

    # time for the set up until first run
    experiment_info['set_up_time'] = time() - program_start_time
    print('[Time]: Set-up %s' % strftime("%H:%M:%S", gmtime(experiment_info['set_up_time'])))
    sys.stdout.flush()

    # initialize runtime statistic
    run_times = None

    # cross validation
    for run in range(Runs):
        # time stamp at start of run
        run_start_time = time()

        print('[Run %s] Cross-validation test fold %s' % (str(run + 1), str(run + 1)))

        # take the right preprocessed train/test data set for the current run
        train_fold, test_fold = train_test_folds[run]
        n_train, n_test = len(train_fold), len(test_fold)

        # initialize train and test dataloaders
        trainset = DataLoaders.BiasCorrectionCosmoData(
            config=config,
            files=train_fold,
            station_data_dict=data_dictionary)
        trainloader = DataLoader(trainset, batch_size=BatchSize, shuffle=False,
                                 num_workers=n_loaders, collate_fn=DataLoaders.collate_fn)

        testset = DataLoaders.BiasCorrectionCosmoData(
            config=config,
            files=test_fold,
            station_data_dict=data_dictionary)
        testloader = DataLoader(testset, batch_size=BatchSize, shuffle=False,
                                num_workers=n_loaders, collate_fn=DataLoaders.collate_fn)

        # -----------------------------------#
        # ----------- TRAINING  ----------- #
        # ----------------------------------#

        # initialize variables for epoch statistics
        train_start_time = time()

        run_output_path = output_path + '/run_%s' % run
        # load existing bias data
        if (os.path.exists(run_output_path + '/station_bias.pkl') and
                os.path.exists(run_output_path + '/station_hour_bias.pkl') and
                os.path.exists(run_output_path + '/station_hour_month_bias.pkl')):
            print('Load existing bias data and skip training...')
            with open(run_output_path + '/station_bias.pkl', 'rb') as handle:
                station_bias = pkl.load(handle)
            with open(run_output_path + '/station_hour_bias.pkl', 'rb') as handle:
                station_hour_bias = pkl.load(handle)
            with open(run_output_path + '/station_hour_month_bias.pkl', 'rb') as handle:
                station_hour_month_bias = pkl.load(handle)
            print('Existing bias data loaded.')
        # calculate bias on training data and store it
        else:
            # set up multiprocessing workers per hour in [0, 23]
            pipes = []
            processes = []
            station_mapping = {}
            for p in range(n_workers):
                parent, child = Pipe()
                pipes += [parent]
                process_stations = [stations[s] for s in range(p, n_stations, n_workers)]
                for ps in process_stations:
                    station_mapping[ps] = p
                processes += [TrainWorker(child, process_stations, n_train // n_stations, n_test // n_stations, run)]

            for p in processes:
                p.start()

            # loop over complete train set and sample all data points the same way as with the NN models
            for i, data in enumerate(trainloader, 0):
                try:
                    # get training batch, e.g. label, cosmo-1 output and external features
                    Label, Prediction, Station, Month, Hour = tuple(map(lambda x: x.numpy().squeeze(), data[:5]))
                except ValueError:
                    # when the batch size is small, it could happen, that all labels have been corrupted and therefore
                    # collate_fn would return an empty list
                    print('Skipped ValueError in train.')
                    continue

                for batch_sample in range(Label.shape[0]):
                    station = Station[batch_sample]
                    process_station = station_mapping[station]
                    pipe = pipes[process_station]
                    pipe.send((LeadTimes, Prediction[batch_sample], Label[batch_sample], station,
                               Hour[batch_sample], Month[batch_sample]))

            print("All training samples have been queued. Waiting on results...")
            sys.stdout.flush()

            # send poison pill to worker, to signalize end of training phase
            for pipe in pipes:
                pipe.send(None)

            station_hour_month_bias_data_set = None
            for pipe_idx, pipe in enumerate(pipes):
                while True:
                    result = pipe.recv()

                    # check if it is the last result sent by worker
                    if result is None:
                        print("All results of process %s have been consumed." % pipe_idx)
                        break

                    try:
                        station_hour_month_bias_data_set = station_hour_month_bias_data_set.combine_first(result)
                    except AttributeError:
                        station_hour_month_bias_data_set = result

            for pipe in pipes:
                pipe.close()

            sys.stdout.flush()

            station_labels = station_hour_month_bias_data_set.coords['station'].data
            hour_labels = station_hour_month_bias_data_set.coords['hour'].data
            month_labels = station_hour_month_bias_data_set.coords['month'].data

            # calculate the average error made for a specific station
            station_bias = xr.DataArray(np.nanmean(station_hour_month_bias_data_set.data, axis=(2, 3, 4))[..., None],
                                        dims=('lead', 'station', 'bias'),
                                        coords=(LeadTimes, station_labels, ['bias']))

            # calculate the average error made for specific station at a specific hour
            station_hour_bias = xr.DataArray(np.nanmean(station_hour_month_bias_data_set.data, axis=(3, 4))[..., None],
                                             dims=('lead', 'station', 'hour', 'bias'),
                                             coords=(LeadTimes, station_labels, hour_labels, ['bias']))

            station_hour_month_bias = xr.DataArray(np.nanmean(station_hour_month_bias_data_set.data, axis=(4))[..., None],
                                                   dims=('lead', 'station', 'hour', 'month', 'bias'),
                                                   coords=(LeadTimes, station_labels, hour_labels, month_labels, ['bias']))

            # fill missing values (nan) with 0
            station_hour_month_bias = station_hour_month_bias.fillna(0)
            station_bias = station_bias.fillna(0)
            station_hour_bias = station_hour_bias.fillna(0)

            # store time to process complete train set
            train_time = time() - train_start_time
            experiment_info['run_time_train'] = train_time

            # dump experiment statistic
            print('Store bias data after training...')
            run_output_path = output_path + '/run_%s' % run
            if not os.path.exists(run_output_path):
                os.makedirs(run_output_path)
            with open(run_output_path + '/station_bias.pkl', 'wb') as handle:
                pkl.dump(station_bias, handle, protocol=pkl.HIGHEST_PROTOCOL)
            with open(run_output_path + '/station_hour_bias.pkl', 'wb') as handle:
                pkl.dump(station_hour_bias, handle, protocol=pkl.HIGHEST_PROTOCOL)
            with open(run_output_path + '/station_hour_month_bias.pkl', 'wb') as handle:
                pkl.dump(station_hour_month_bias, handle, protocol=pkl.HIGHEST_PROTOCOL)
            print('Stored bias data after training.')

        # -----------------------------------#
        # -----------  TESTING  ----------- #
        # ----------------------------------#

        test_start_time = time()

        # set up multiprocessing workers per hour in [0, 23]
        pipes = []
        processes = []
        station_mapping = {}
        for p in range(n_workers):
            parent, child = Pipe()
            pipes += [parent]
            process_stations = [stations[s] for s in range(p, n_stations, n_workers)]
            for ps in process_stations:
                station_mapping[ps] = p
            processes += [TestWorker(child, process_stations, n_test // n_stations, n_test // n_stations, run)]

        for p in processes:
            p.start()

        trained_stations = set()

        # loop over complete test set and sample all data points the same way as with the NN models
        for i, data in enumerate(testloader, 0):
            try:
                # get training batch, e.g. label, cosmo-1 output and external features
                Label, Prediction, Station, Month, Hour = tuple(map(lambda x: x.numpy().squeeze(), data[:5]))
            except ValueError:
                # when the batch size is small, it could happen, that all labels have been corrupted and therefore
                # collate_fn would return an empty list
                print('Skipped ValueError in test.')
                continue

            for batch_sample in range(Label.shape[0]):
                station = Station[batch_sample]
                trained_stations.add(station)
                prediction = Prediction[batch_sample]
                hour = Hour[batch_sample]
                month = Month[batch_sample]
                label = Label[batch_sample]

                # calculate the prediction with subtracting the learned mean error of the station over all hours
                try:
                    bias_corrected_output = prediction - station_bias.sel(station=station).data.squeeze()
                except KeyError:
                    # if we have no data for a station and hour, we do not correct the data
                    print('Station bias not corrected, because no bias for station %s available.' % station)
                    bias_corrected_output = prediction

                # calculate the prediction with subtracting the learned mean error of the station at a specific hour
                time_bias_corrected_output = []
                time_month_bias_corrected_output = []

                for lead_idx, lead in enumerate(LeadTimes):
                    lead_hour = hour[lead_idx]
                    lead_month = month[lead_idx]

                    try:
                        time_bias_corrected_output += [
                            prediction[lead_idx] - station_hour_bias.sel(lead=lead, station=station,
                                                                         hour=lead_hour).data.squeeze()]
                    except KeyError:
                        # if we have no data for a station and hour, we do not correct the data
                        print('Station time-bias not corrected, because no bias for station %s and time %s available.'
                              % (station, lead_hour))
                        time_bias_corrected_output += [prediction[lead_idx]]

                    # calculate the prediction with subtracting the learned mean b
                    # of the station at a specific hour for specific month
                    try:
                        time_month_bias_corrected_output += [
                            prediction[lead_idx] - station_hour_month_bias.sel(lead=lead, station=station,
                                                                               hour=lead_hour,
                                                                               month=lead_month).data.squeeze()]
                    except KeyError:
                        # if we have no data for a station and hour, we do not correct the data
                        print(
                            'Station time-month-bias not corrected, because no bias for station %s, time %s and month % s available.'
                            % (station, lead_hour, lead_month))
                        time_month_bias_corrected_output += [prediction[lead_idx]]

                process_station = station_mapping[station]
                pipe = pipes[process_station]
                pipe.send((LeadTimes, label, prediction, station, hour, month, bias_corrected_output,
                           time_bias_corrected_output, time_month_bias_corrected_output))

        # send poison pill to worker, to signalize end of training phase
        for pipe in pipes:
            pipe.send(None)

        # reset bias to not memorize test data from previous run
        error_statistics = None

        for pipe_idx, pipe in enumerate(pipes):
            while True:
                result = pipe.recv()

                # check if it is the last result sent by worker
                if result is None:
                    print("All results of process %s have been consumed." % pipe_idx)
                    break

                try:
                    error_statistics = error_statistics.combine_first(result)
                except AttributeError:
                    error_statistics = result

        for pipe in pipes:
            pipe.close()

        # flush output to see progress
        sys.stdout.flush()

        # time for test
        test_time = time() - test_start_time
        run_time = time() - run_start_time

        # flush output to see progress
        sys.stdout.flush()

        experiment_info['run_time_test'] = test_time
        experiment_info['run_time'] = run_time

        # generate data set of all experiment statistics and additional information
        experiment_statistic = xr.Dataset({
            'error_statistic': error_statistics,
            'run_time_statistic': run_times,
        })
        experiment_statistic.attrs['experiment_info'] = experiment_info

        with open(run_output_path + '/experiment_statistic.pkl', 'wb') as handle:
            pkl.dump(experiment_statistic, handle, protocol=pkl.HIGHEST_PROTOCOL)

    # print program execution time
    m, s = divmod(time() - program_start_time, 60)
    h, m = divmod(m, 60)
    print('Program has successfully finished in %dh %02dmin %02ds' % (h, m, s))
Example #32
0
def Pipe(duplex=True):
    """
    Returns two connection object connected by a pipe
    """
    from multiprocessing.connection import Pipe
    return Pipe(duplex)
Example #33
0
from multiprocessing.connection import Client, Listener, wait, Pipe
from multiprocessing import Queue, Process, Pool, Process, Lock, Value, Array, Manager
import socket
import time
#=====================================pip and queue===============================================================
switch_to_queue = False
parent_conn, child_conn = Pipe()
queue = Queue()
address = ('localhost', 6000)
family = 'AF_UNIX'

def server_start(child_conn=None, queue=None):
    def task(conn):
        print('server side start')
        # if switch_to_queue and queue:
        #     for i in range(0,10):
        #         queue.put('[queue] coming from server side')
        #         time.sleep(1)
        # elif conn:
        #     for i in range(0,10):
        #         conn.send('[pip] coming from server side...{}'.format(i))
        #         time.sleep(0.3)
        #     conn.send('exit')

        with Client(address) as client:
            for i in range(0, 10):
                client.send('push {} msg to client '.format(i))

    p = Process(target=task, args=(child_conn,))
    p.start()
    # p.join()
Example #34
0
class Queue(object):

    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)

    def __getstate__(self):
        assert_spawning(self)
        return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
                self._rlock, self._wlock, self._sem, self._opid)

    def __setstate__(self, state):
        (self._ignore_epipe, self._maxsize, self._reader, self._writer,
         self._rlock, self._wlock, self._sem, self._opid) = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send = self._writer.send
        self._recv = self._reader.recv
        self._poll = self._reader.poll

    def put(self, obj, block=True, timeout=None):
        assert not self._closed
        if not self._sem.acquire(block, timeout):
            raise Full

        self._notempty.acquire()
        try:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()
        finally:
            self._notempty.release()

    def get(self, block=True, timeout=None):
        if block and timeout is None:
            self._rlock.acquire()
            try:
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if block:
                    timeout = deadline - time.time()
                    if timeout < 0 or not self._poll(timeout):
                        raise Empty
                elif not self._poll():
                    raise Empty
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

    def qsize(self):
        # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        self._reader.close()
        if self._close:
            self._close()

    def join_thread(self):
        debug('Queue.join_thread()')
        assert self._closed
        if self._jointhread:
            self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send,
                  self._wlock, self._writer.close, self._ignore_epipe),
            name='QueueFeederThread'
            )
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(
                self._thread, Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5
                )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(
            self, Queue._finalize_close,
            [self._buffer, self._notempty],
            exitpriority=10
            )

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        notempty.acquire()
        try:
            buffer.append(_sentinel)
            notempty.notify()
        finally:
            notempty.release()

    @staticmethod
    def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass