Exemple #1
0
def get_temp_dir():
    if current_process()._tempdir is None:
        import shutil, tempfile
        tempdir = tempfile.mkdtemp(prefix='pymp-')
        info('created temp directory %s', tempdir)
        Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
        current_process()._tempdir = tempdir
    return current_process()._tempdir
Exemple #2
0
def get_temp_dir():
    # get name of a temp directory which will be automatically cleaned up
    if current_process()._tempdir is None:
        import shutil, tempfile
        tempdir = tempfile.mkdtemp(prefix='pymp-')
        info('created temp directory %s', tempdir)
        Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
        current_process()._tempdir = tempdir
    return current_process()._tempdir
Exemple #3
0
def worker(worker_queue, result_queue):
    try:
        for number in iter(worker_queue.get, None):
            print number
            Crawl.crawl_web(number)
            result_queue.put("%s success with: %s" % (number,
                                                      current_process().name))
    except Exception, e:
        result_queue.put("%s failed with: %s" % (current_process().name,
                                                       e.message))
Exemple #4
0
    def run(self):
        import time

        print "self.arr[10] = %f, Process = %s" % (self.arr[10], current_process())

        print self.arr.shape

        n = self.arr.shape[0] - 1

        print "self.arr[%d] = %f, Process = %s" % (n, self.arr[n], current_process())
        time.sleep(10)
Exemple #5
0
def prepare(data):
    """
    Try to get current process ready to unpickle process object
    """
    old_main_modules.append(sys.modules['__main__'])
    if 'name' in data:
        process.current_process().name = data['name']
    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']
    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()
    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])
    if 'sys_path' in data:
        sys.path = data['sys_path']
    if 'sys_argv' in data:
        sys.argv = data['sys_argv']
    if 'dir' in data:
        os.chdir(data['dir'])
    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']
    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        main_name = main_name == '__init__' and os.path.basename(os.path.dirname(main_path))
    if main_name != 'ipython':
        import imp
        if main_path is None:
            dirs = None
        elif os.path.basename(main_path).startswith('__init__.py'):
            dirs = [os.path.dirname(os.path.dirname(main_path))]
        else:
            dirs = [os.path.dirname(main_path)]
        if not main_name not in sys.modules:
            raise AssertionError(main_name)
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                main_module = imp.load_module('__parents_main__', file, path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass

    return
Exemple #6
0
    def get_preparation_data(name):
        '''
        Return info about parent needed by child to unpickle process object
        '''
        from .util import _logger, _log_to_stderr

        d = dict(
            name=name,
            sys_path=sys.path,
            sys_argv=sys.argv,
            log_to_stderr=_log_to_stderr,
            orig_dir=process.ORIGINAL_DIR,
            authkey=process.current_process().authkey,
            )

        if _logger is not None:
            d['log_level'] = _logger.getEffectiveLevel()

        if not WINEXE and not WINSERVICE:
            main_path = getattr(sys.modules['__main__'], '__file__', None)
            if not main_path and sys.argv[0] not in ('', '-c'):
                main_path = sys.argv[0]
            if main_path is not None:
                if not os.path.isabs(main_path) and \
                                          process.ORIGINAL_DIR is not None:
                    main_path = os.path.join(process.ORIGINAL_DIR, main_path)
                d['main_path'] = os.path.normpath(main_path)

        return d
Exemple #7
0
 def main():
     """
     Run code specified by data received over pipe
     """
     raise is_forking(sys.argv) or AssertionError
     handle = int(sys.argv[-1])
     fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
     from_parent = os.fdopen(fd, 'rb')
     process.current_process()._inheriting = True
     preparation_data = load(from_parent)
     prepare(preparation_data)
     self = load(from_parent)
     process.current_process()._inheriting = False
     from_parent.close()
     exitcode = self._bootstrap()
     exit(exitcode)
Exemple #8
0
def get_preparation_data(name):
    """
    Return info about parent needed by child to unpickle process object.
    Monkey-patch from
    """
    d = dict(
        name=name,
        sys_path=sys.path,
        sys_argv=sys.argv,
        log_to_stderr=_log_to_stderr,
        orig_dir=process.ORIGINAL_DIR,
        authkey=process.current_process().authkey,
    )

    if _logger is not None:
        d["log_level"] = _logger.getEffectiveLevel()

    if not WINEXE:
        main_path = getattr(sys.modules["__main__"], "__file__", None)
        if not main_path and sys.argv[0] not in ("", "-c"):
            main_path = sys.argv[0]
        if main_path is not None:
            if not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None:
                main_path = os.path.join(process.ORIGINAL_DIR, main_path)
            if not main_path.endswith(".exe"):
                d["main_path"] = os.path.normpath(main_path)

    return d
Exemple #9
0
 def makeRecord(self, *args, **kwds):
     record = OldLoggerClass.makeRecord(self, *args, **kwds)
     if current_process:
         record.processName = current_process()._name
     else:
         record.processName = ""
     return record
Exemple #10
0
def get_preparation_data(name):
    '''
    Return info about parent needed by child to unpickle process object.
    Monkey-patch from
    
    http://www.velocityreviews.com/forums/t669125-using-multiprocessing-from-a-windows-service.html
    '''
    d = dict(
        name=name,
        sys_path=sys.path,
        sys_argv=sys.argv,
        log_to_stderr=_log_to_stderr,
        orig_dir=process.ORIGINAL_DIR,
        authkey=process.current_process().authkey,
    )
    
    if _logger is not None:
        d['log_level'] = _logger.getEffectiveLevel()
    
    if not WINEXE:
        main_path = getattr(sys.modules['__main__'], '__file__', None)
        if not main_path and sys.argv[0] not in ('', '-c'):
            main_path = sys.argv[0]
        if main_path is not None:
            if not os.path.isabs(main_path) and process.ORIGINAL_DIR\
                 is not None:
                main_path = os.path.join(process.ORIGINAL_DIR,main_path)
            if not main_path.endswith('.exe'):
                d['main_path'] = os.path.normpath(main_path)
                
    return d
Exemple #11
0
    def launch(self, proc_number):
        """Runs a simulation
        :return: double: the ratio of successful hands over total hands
        """

        logging.info(process.current_process().name + ': Plot data will be collected every {} runs'.
                     format(self.collect_frequency))

        success_count = 0
        deck = Deck()

        for sim_nb in range(self.number_simulations):
            deck.initialise()
            card_rules=CardRules(deck)
            card_rules.set_target(self.target_rank)

            cards_in_hand = self.get_starting_hand(deck, self.starting_cards)

            for v in range(self.number_of_draws):
                retained_cards = card_rules.apply_rules(cards_in_hand)
                #draw additional cards from deck to make a full hand
                dealer_cards = [deck.get_card() for c in
                                range(self.MAX_CARDS_IN_HAND - len(retained_cards))]
                cards_in_hand=retained_cards+dealer_cards

            # at the end of the last draw we check the final hand to see if we hit the target
            is_success=card_rules.check_success(cards_in_hand)
            if is_success:
                success_count += 1
            if self.is_plot and sim_nb % self.collect_frequency == 0 and sim_nb > 0:
                self._intermediate_results.append((success_count) / sim_nb)

        self._simulation_result = (success_count)/self.number_simulations
        return self
Exemple #12
0
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
                   active_children=active_children,
                   current_process=current_process):
    # NB: we hold on to references to functions in the arglist due to the
    # situation described below, where this function is called after this
    # module's globals are destroyed.

    global _exiting

    info('process shutting down')
    debug('running all "atexit" finalizers with priority >= 0')
    _run_finalizers(0)

    if current_process() is not None:
        # NB: we check if the current process is None here because if
        # it's None, any call to ``active_children()`` will throw an
        # AttributeError (active_children winds up trying to get
        # attributes from util._current_process).  This happens in a
        # variety of shutdown circumstances that are not well-understood
        # because module-scope variables are not apparently supposed to
        # be destroyed until after this function is called.  However,
        # they are indeed destroyed before this function is called.  See
        # issues 9775 and 15881.  Also related: 4106, 9205, and 9207.

        for p in active_children():
            if p._daemonic:
                info('calling terminate() for daemon %s', p.name)
                p._popen.terminate()

        for p in active_children():
            info('calling join() for process %s', p.name)
            p.join()

    debug('running the remaining "atexit" finalizers')
    _run_finalizers()
Exemple #13
0
    def get_command_line():
        '''
        Returns prefix of command line used for spawning a child process
        '''
        if getattr(process.current_process(), '_inheriting', False):
            raise RuntimeError('''
            Attempt to start a new process before the current process
            has finished its bootstrapping phase.

            This probably means that you are on Windows and you have
            forgotten to use the proper idiom in the main module:

                if __name__ == '__main__':
                    freeze_support()
                    ...

            The "freeze_support()" line can be omitted if the program
            is not going to be frozen to produce a Windows executable.''')

        if getattr(sys, 'frozen', False):
            return [sys.executable, '--multiprocessing-fork']
        else:
            prog = 'from multiprocessing.forking import main; main()'
            opts = util._args_from_interpreter_flags()
            return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
    def get_command_line():
        """
        Returns prefix of command line used for spawning a child process
        """
        if getattr(process.current_process(), "_inheriting", False):
            raise RuntimeError(
                """
            Attempt to start a new process before the current process
            has finished its bootstrapping phase.

            This probably means that you are on Windows and you have
            forgotten to use the proper idiom in the main module:

                if __name__ == '__main__':
                    freeze_support()
                    ...

            The "freeze_support()" line can be omitted if the program
            is not going to be frozen to produce a Windows executable."""
            )

        if getattr(sys, "frozen", False):
            return [sys.executable, "--multiprocessing-fork"]
        else:
            prog = "from multiprocessing.forking import main; main()"
            return [_python_exe, "-c", prog, "--multiprocessing-fork"]
Exemple #15
0
    def imply_start(self):
        _aspect = cs_aspect(self.sub)

        tracker = self.state_persist.start_aspect(_aspect)
        if tracker:
            self.state_persist.set_state_data(_aspect, {"state": "started", "by": current_process().pid})
            return tracker
Exemple #16
0
def get_preparation_data(name, init_main_module=True):
    '''
    Return info about parent needed by child to unpickle process object
    '''
    _check_not_importing_main()
    d = dict(
        log_to_stderr=util._log_to_stderr,
        authkey=bytes(process.current_process().authkey),
    )

    if util._logger is not None:
        d['log_level'] = util._logger.getEffectiveLevel()
        if len(util._logger.handlers) > 0:
            h = util._logger.handlers[0]
            d['log_fmt'] = h.formatter._fmt

    sys_path = [p for p in sys.path]
    try:
        i = sys_path.index('')
    except ValueError:
        pass
    else:
        sys_path[i] = process.ORIGINAL_DIR

    d.update(
        name=name,
        sys_path=sys_path,
        sys_argv=sys.argv,
        orig_dir=process.ORIGINAL_DIR,
        dir=os.getcwd()
    )

    if sys.platform != "win32":
        # Pass the semaphore_tracker pid to avoid re-spawning it in every child
        from . import semaphore_tracker
        semaphore_tracker.ensure_running()
        d['tracker_pid'] = semaphore_tracker._semaphore_tracker._pid

    # Figure out whether to initialise main in the subprocess as a module
    # or through direct execution (or to leave it alone entirely)
    if init_main_module:
        main_module = sys.modules['__main__']
        try:
            main_mod_name = getattr(main_module.__spec__, "name", None)
        except BaseException:
            main_mod_name = None
        if main_mod_name is not None:
            d['init_main_from_name'] = main_mod_name
        elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
            main_path = getattr(main_module, '__file__', None)
            if main_path is not None:
                if (not os.path.isabs(main_path) and
                        process.ORIGINAL_DIR is not None):
                    main_path = os.path.join(process.ORIGINAL_DIR, main_path)
                d['init_main_from_path'] = os.path.normpath(main_path)
                # Compat for python2.7
                d['main_path'] = d['init_main_from_path']

    return d
Exemple #17
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt'])
        )

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky', force=True)

    if 'tacker_pid' in data:
        from . import semaphore_tracker
        semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
Exemple #18
0
def set_mp_process_title(progname, info=None):
    """Set the ps name using the multiprocessing process name.

    Only works if :mod:`setproctitle` is installed.

    """
    from multiprocessing.process import current_process
    return set_process_title("%s.%s" % (progname, current_process().name),
                             info=info)
Exemple #19
0
def sim_mask(args):
    # Properly Parallel
    # http://stackoverflow.com/questions/444591/convert-a-string-of-bytes-into-an-int-python
    myid = current_process()._identity[0]
    np.random.seed(myid ^ struct.unpack("<L", os.urandom(4))[0])

    # Be Nice
    niceness = os.nice(0)
    os.nice(5 - niceness)

    lives = 10
    kwargs, pp_defaults, retain_data = args
    sim_time = kwargs.pop("sim_time", None)
    while True:
        try:
            sim = Simulation(**kwargs)
            logging.info("{0} starting {1}".format(current_process(), sim.title))
            prep_stats = sim.prepare(sim_time=sim_time)
            sim_time = sim.simulate()
            return_dict = sim.postprocess(**pp_defaults)
            if retain_data is True:  # Implicitly implies boolean datatype
                return_val = sim.generate_datapackage()
            elif retain_data == "additional_only":
                dp = sim.generate_datapackage()
                return_val = dp.additional.copy()
            elif retain_data == "file":
                return_val = sim.generate_datapackage().write(
                    kwargs.get("title"))
            else:
                return_val = return_dict
            del sim
            return return_val
        except (KeyboardInterrupt, SystemExit):
            raise
        except RuntimeError:
            lives -= 1
            if lives <= 0:
                raise
            else:
                logging.critical(
                    "{0} died, restarting: {1} lives remain".format(current_process(), lives))
                del sim
        gc.collect()
Exemple #20
0
 def _getPool(self):
     '''
     Provides the pool for the current process.
     '''
     process = current_process()
     try: return process._ally_db_pool
     except AttributeError: pass
     pool = process._ally_db_pool = self._wrapped.recreate()
     self._pools.add(pool)
     return pool
Exemple #21
0
def set_mp_process_title(progname, info=None, hostname=None):
    """Set the ps name using the multiprocessing process name.

    Only works if :mod:`setproctitle` is installed.

    """
    from multiprocessing.process import current_process
    if hostname:
        progname = "%s@%s" % (progname, hostname.split(".")[0])
    return set_process_title("%s:%s" % (progname, current_process().name),
                             info=info)
Exemple #22
0
    def set_mp_process_title(progname, info=None, hostname=None, rate_limit=False):  # noqa
        """Set the ps name using the multiprocessing process name.

        Only works if :mod:`setproctitle` is installed.

        """
        if not rate_limit or _setps_bucket.can_consume(1):
            if hostname:
                progname = "%s@%s" % (progname, hostname.split(".")[0])
            if current_process is not None:
                return set_process_title("%s:%s" % (progname, current_process().name), info=info)
            else:
                return set_process_title(progname, info=info)
Exemple #23
0
    def start(self):
        """
        Start child process
        """
        assert self._popen is None, 'cannot start a process twice'
        assert self._parent_pid == os.getpid(), \
               'can only start a process object created by current process'

        # This is the code I'm commenting out and allows me to perform the
        # dangerous task of forking inside a daemon process.
        """
        assert not current_process()._daemonic, \
               'daemonic processes are not allowed to have children'
        """

        _cleanup()
        if self._Popen is not None:
            Popen = self._Popen
        else:
            from multiprocessing.forking import Popen
        self._popen = Popen(self)
        current_process()._children.add(self)
def write_text(event_file):
    """Takes event file as input, writes text from all queries contained in
    event file to TEXT_DIR"""
    for line in open(event_file):
        query = line.strip()
        logger.info('Writing query from %s: "%s"' % (current_process(), query))
        qi = QueryIterator('http://search-s10.prod.wikia.net:8983/solr/main/', {'query': query, 'fields': 'id,wid,html_en,indexed', 'sort': 'id asc'})
        for doc in qi:
            # Sanitize and write text
            text = '\n'.join(clean_list(doc.get('html_en', '')))
            localpath = os.path.join(TEXT_DIR, doc['id'])
            logger.debug('Writing text from %s to %s' % (doc['id'], localpath))
            with open(localpath, 'w') as f:
                f.write(text)
    return 'Finished event file %s' % event_file
Exemple #25
0
 def __repr__(self):
     try:
         if self._semlock._is_mine():
             name = current_process().name
             if threading.current_thread().name != 'MainThread':
                 name += '|' + threading.current_thread().name
         elif self._semlock._get_value() == 1:
             name = 'None'
         elif self._semlock._count() > 0:
             name = 'SomeOtherThread'
         else:
             name = 'SomeOtherProcess'
     except Exception:
         name = 'unknown'
     return '<Lock(owner=%s)>' % name
Exemple #26
0
    def __repr__(self):
        try:
            if self._semlock._is_mine():
                name = current_process().name
                if threading.current_thread().name != "MainThread":
                    name += "|" + threading.current_thread().name
            elif self._semlock._get_value() == 1:
                name = "None"
            elif self._semlock._count() > 0:
                name = "SomeOtherThread"
            else:
                name = "SomeOtherProcess"
        except Exception:
            name = "unknown"

        return "<Lock(owner=%s)>" % name
Exemple #27
0
 def __repr__(self):
     try:
         if self._semlock._is_mine():
             name = current_process().name
             if threading.current_thread().name != 'MainThread':
                 name += '|' + threading.current_thread().name
             count = self._semlock._count()
         elif self._semlock._get_value() == 1:
             name, count = 'None', 0
         elif self._semlock._count() > 0:
             name, count = 'SomeOtherThread', 'nonzero'
         else:
             name, count = 'SomeOtherProcess', 'nonzero'
     except Exception:
         name, count = 'unknown', 'unknown'
     return '<RLock(%s, %s)>' % (name, count)
Exemple #28
0
def _check_not_importing_main():
    if getattr(process.current_process(), '_inheriting', False):
        raise RuntimeError('''
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.''')
Exemple #29
0
def _exit_function(info = info, debug = debug, _run_finalizers = _run_finalizers, active_children = active_children, current_process = current_process):
    info('process shutting down')
    debug('running all "atexit" finalizers with priority >= 0')
    _run_finalizers(0)
    if current_process() is not None:
        for p in active_children():
            if p._daemonic:
                info('calling terminate() for daemon %s', p.name)
                p._popen.terminate()

        for p in active_children():
            info('calling join() for process %s', p.name)
            p.join()

    debug('running the remaining "atexit" finalizers')
    _run_finalizers()
    return
Exemple #30
0
    def __repr__(self):
        try:
            if self._semlock._is_mine():
                name = current_process().name
                if threading.current_thread().name != "MainThread":
                    name += "|" + threading.current_thread().name
                count = self._semlock._count()
            elif self._semlock._get_value() == 1:
                name, count = ("None", 0)
            elif self._semlock._count() > 0:
                name, count = ("SomeOtherThread", "nonzero")
            else:
                name, count = ("SomeOtherProcess", "nonzero")
        except Exception:
            name, count = ("unknown", "unknown")

        return "<RLock(%s, %s)>" % (name, count)
Exemple #31
0
 def makeRecord(self, *args, **kwds):
     record = OldLoggerClass.makeRecord(self, *args, **kwds)
     record.processName = current_process()._name
     return record
Exemple #32
0
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
    '''Run forkserver.'''
    if preload:
        if '__main__' in preload and main_path is not None:
            process.current_process()._inheriting = True
            try:
                spawn.import_main_path(main_path)
            finally:
                del process.current_process()._inheriting
        for modname in preload:
            try:
                __import__(modname)
            except ImportError:
                pass

    util._close_stdin()

    sig_r, sig_w = os.pipe()
    os.set_blocking(sig_r, False)
    os.set_blocking(sig_w, False)

    def sigchld_handler(*_unused):
        # Dummy signal handler, doesn't do anything
        pass

    handlers = {
        # unblocking SIGCHLD allows the wakeup fd to notify our event loop
        signal.SIGCHLD:
        sigchld_handler,
        # protect the process from ^C
        signal.SIGINT:
        signal.SIG_IGN,
    }
    old_handlers = {
        sig: signal.signal(sig, val)
        for (sig, val) in handlers.items()
    }

    # calling os.write() in the Python signal handler is racy
    signal.set_wakeup_fd(sig_w)

    # map child pids to client fds
    pid_to_fd = {}

    with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
         selectors.DefaultSelector() as selector:
        _forkserver._forkserver_address = listener.getsockname()

        selector.register(listener, selectors.EVENT_READ)
        selector.register(alive_r, selectors.EVENT_READ)
        selector.register(sig_r, selectors.EVENT_READ)

        while True:
            try:
                while True:
                    rfds = [key.fileobj for (key, events) in selector.select()]
                    if rfds:
                        break

                if alive_r in rfds:
                    # EOF because no more client processes left
                    assert os.read(alive_r, 1) == b'', "Not at EOF?"
                    raise SystemExit

                if sig_r in rfds:
                    # Got SIGCHLD
                    os.read(sig_r, 65536)  # exhaust
                    while True:
                        # Scan for child processes
                        try:
                            pid, sts = os.waitpid(-1, os.WNOHANG)
                        except ChildProcessError:
                            break
                        if pid == 0:
                            break
                        child_w = pid_to_fd.pop(pid, None)
                        if child_w is not None:
                            if os.WIFSIGNALED(sts):
                                returncode = -os.WTERMSIG(sts)
                            else:
                                if not os.WIFEXITED(sts):
                                    raise AssertionError(
                                        "Child {0:n} status is {1:n}".format(
                                            pid, sts))
                                returncode = os.WEXITSTATUS(sts)
                            # Send exit code to client process
                            try:
                                write_signed(child_w, returncode)
                            except BrokenPipeError:
                                # client vanished
                                pass
                            os.close(child_w)
                        else:
                            # This shouldn't happen really
                            warnings.warning('forkserver: waitpid returned '
                                             'unexpected pid %d' % pid)

                if listener in rfds:
                    # Incoming fork request
                    with listener.accept()[0] as s:

                        # XXX Thing that changed - be tolerant of socket disconnects
                        try:
                            # Receive fds from client
                            fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
                        except EOFError:
                            # broken socket due to reconnection on client-side
                            continue
                        # XXX Thing that changed - be tolerant of socket disconnects

                        if len(fds) > MAXFDS_TO_SEND:
                            raise RuntimeError(
                                "Too many ({0:n}) fds to send".format(
                                    len(fds)))
                        child_r, child_w, *fds = fds
                        s.close()
                        pid = os.fork()
                        if pid == 0:
                            # Child
                            code = 1
                            try:
                                listener.close()
                                selector.close()
                                unused_fds = [alive_r, child_w, sig_r, sig_w]
                                unused_fds.extend(pid_to_fd.values())
                                code = _serve_one(child_r, fds, unused_fds,
                                                  old_handlers)
                            except Exception:
                                sys.excepthook(*sys.exc_info())
                                sys.stderr.flush()
                            finally:
                                os._exit(code)
                        else:
                            # Send pid to client process
                            try:
                                write_signed(child_w, pid)
                            except BrokenPipeError:
                                # client vanished
                                pass
                            pid_to_fd[pid] = child_w
                            os.close(child_r)
                            for fd in fds:
                                os.close(fd)

            except OSError as e:
                if e.errno != errno.ECONNABORTED:
                    raise
Exemple #33
0
                        type=int,
                        required=True,
                        help='File handle for the pipe')
    parser.add_argument('--process-name',
                        type=str,
                        default=None,
                        help='Identifier for debugging purpose')

    args = parser.parse_args()

    info = dict()

    exitcode = 1
    try:
        with os.fdopen(args.pipe, 'rb') as from_parent:
            process.current_process()._inheriting = True
            try:
                prep_data = pickle.load(from_parent)
                spawn.prepare(prep_data)
                process_obj = pickle.load(from_parent)
            finally:
                del process.current_process()._inheriting

        exitcode = process_obj._bootstrap()
    except Exception as e:
        print('\n\n' + '-' * 80)
        print('{} failed with traceback: '.format(args.process_name))
        print('-' * 80)
        import traceback
        print(traceback.format_exc())
        print('\n' + '-' * 80)
Exemple #34
0

import logging
import logging.config
import os.path
import sys

from multiprocessing.process import current_process

LOG_DEBUG    = logging.DEBUG
LOG_INFO     = logging.INFO
LOG_WARNING  = logging.WARNING
LOG_ERROR    = logging.ERROR
LOG_CRITICAL = logging.CRITICAL

if sys.platform == 'win32' and current_process().name != 'MainProcess':
    _mode = 'a'  # Avoid mangling by subprocesses.
else:
    _mode = 'w'
_filename = 'openmdao_log.txt'

# Ensure we can write to the log file.
try:
    _tmplog = open(_filename, _mode)
except IOError:
    _filename = 'openmdao_log_%d.txt' % os.getpid()
else:
    _tmplog.close()

# Allow everything through, typical UNIX-ish timestamp, typical log format.
logging.basicConfig(level=logging.WARNING,
Exemple #35
0
import matplotlib.animation as animation
import progressbar
import dill as pickle
from scipy.spatial import KDTree
from sklearn.neighbors import NearestNeighbors as NN
from helper import *
from helper_mitchellschaeffer import *

from multiprocessing import Process, Queue, Manager, Pool  #we require Pathos version >=0.2.6. Otherwise we will get an "EOFError: Ran out of input" exception
#from pathos.multiprocessing import Pool
import multiprocessing
import ctypes

from multiprocessing import process

process.current_process(
)._config['tempdir'] = '/dev/shm/'  #'/data.bmp/roland/temp/'

id = 18

N = 150
ndata = 30000
sigma = [
    3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3,
    5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5,
    7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7, 3, 5, 7, 5, 7, 7
][id - 1]
sigma_skip = [
    1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1,
    1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1,
    1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 2, 3
][id - 1]
Exemple #36
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name != 'ipython':
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module('__parents_main__', file,
                                              path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        # XXX (ncoghlan): The following code makes several bogus
        # assumptions regarding the relationship between __file__
        # and a module's real name. See PEP 302 and issue #10845
        # The problem is resolved properly in Python 3.4+, as
        # described in issue #19946

        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name == '__main__':
            # For directory and zipfile execution, we assume an implicit
            # "if __name__ == '__main__':" around the module, and don't
            # rerun the main module code in spawned processes
            main_module = sys.modules['__main__']
            main_module.__file__ = main_path
        elif main_name != 'ipython':
            # Main modules not actually called __main__.py may
            # contain additional code that should still be executed
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module('__parents_main__', file,
                                              path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
import bocf_helper as bocfh
import barkley_helper as bh
import mitchell_helper as mh

import cross_prediction_avg_settings as cpmtps

from numpy.linalg.linalg import LinAlgError
#get V animation data -> [N, 150, 150]
#create 2d delay coordinates -> [N, 150, 150, d]
#create new dataset with small data groups -> [N, 150, 150, d*sigma*sigma]
#create d*sigma*sigma-k tree from this data
#search nearest neighbours (1 or 2) and predict new U value

#set the temporary buffer for the multiprocessing module manually to the shm
#to solve "no enough space"-problems
process.current_process()._config['tempdir'] = '/dev/shm/'

tau = cpmtps.tau
N = cpmtps.N
ndata = cpmtps.ndata
trainLength = cpmtps.trainLength
predictionLength = cpmtps.predictionLength
testLength = cpmtps.testLength
#the testLength is included in the predictionLength => testLength < predictionLength, and predictionLength-testLength is the validation length

useInputScaling = False

#will be set by the *_p.py file
direction, prediction_mode, patch_radius, eff_sigma, sigma, sigma_skip = None, None, None, None, None, None,
k, width, basis_points, ddim = None, None, None, None
noise = None