Exemple #1
0
	def _bootstrap(self):
		from multiprocessing import util
		global _current_process

		try:
			self._children = set()
			self._counter = itertools.count(1)
			try:
				# sys.stdin.close()
				sys.stdin = open(os.devnull)
			except (OSError, ValueError):
				pass
			_current_process = self
			util._finalizer_registry.clear()
			util._run_after_forkers()
			util.info('child process calling self.run()')
			try:
				self.run()
				exitcode = 0
			finally:
				pass
				# util._exit_function()
		except SystemExit, e:
			if not e.args:
				exitcode = 1
			elif isinstance(e.args[0], int):
				exitcode = e.args[0]
			else:
				sys.stderr.write(str(e.args[0]) + '\n')
				sys.stderr.flush()
				exitcode = 1
Exemple #2
0
    def set_shared(self, params, tasks, force=False):
        """Configures the mandatory shared resources.

        Updates `params`. Places tasks in `taskq`.

        Args:
            params (dict): Parameters.
            tasks (dict): Tasks.

        Keyword Args:
            force (bool): Forces task into `taskq`, regardless of whether it is already complete.

        """
        # Create a client that will the set the actual resource data.
        params["save"] = self.save
        client = SCManager(user=self.user, address=self.address,
                           authkey=self.authkey)
        client.connect()
        # Update params.
        client.params().update(params)
        # Update tasks.
        taskq = client.taskq()
        for task in tasks.itervalues():
            if force or not task.get("complete", False):
                taskq.put(task)
        util.info("[Server] Shared data successfully created.")
        del client
Exemple #3
0
def _send_result(local, remote, retries):
    """Send a task's result file to remote location.

    Args:
        local (str): Path of local file.
        remote (str): Path of remote file.
        retries (int): Number of retries to perform.

    Return:
        (bool): Indicates whether send was successful.

    """
    send_str = "{} (local) > {} (remote)".format(local, remote)
    sent_pths = ()
    for r in xrange(retries):
        sent_pths = ssh.put(local, remote)
        if sent_pths.succeeded and remote in sent_pths:
            sent = True
            break
        else:
            msg = "[Client] {}/{} failed send retries ({}){{}}".format(
                r + 1, retries, send_str)
            if r + 1 < retries:
                util.info(msg.format(", retrying..."))
            else:
                util.info(msg.format(", exiting."))
    else:
        sent = False
        # sys.exit(0)  # raise NetworkError(msg.format("."))
    return sent
    def bootstrap_2_6_6(self):
        """Pulled from python 2.6.6. Needed to ensure we have the fix from
        http://bugs.python.org/issue5313 when running on python version 2.6.2
        or lower."""

        try:
            self._children = set()
            self._counter = itertools.count(1)
            try:
                sys.stdin.close()
                sys.stdin = open(os.devnull)
            except (OSError, ValueError):
                pass
            multiprocessing._current_process = self
            util._finalizer_registry.clear()
            util._run_after_forkers()
            util.info("child process calling self.run()")
            try:
                self.run()
                exitcode = 0
            finally:
                util._exit_function()
        except SystemExit, e:
            if not e.args:
                exitcode = 1
            elif type(e.args[0]) is int:
                exitcode = e.args[0]
            else:
                sys.stderr.write(e.args[0] + "\n")
                sys.stderr.flush()
                exitcode = 1
 def _after_fork(self):
     self._manager = None
     try:
         self._incref()
     except Exception, e:
         # the proxy may just be for a manager which has shutdown
         util.info('incref failed: %s' % e)
    def _bootstrap(self):
        from multiprocessing import util
        global _current_process

        try:
            self._children = set()
            self._counter = itertools.count(1)
            try:
                sys.stdin.close()
                sys.stdin = open(os.devnull)
            except (OSError, ValueError):
                pass
            _current_process = self
            util._finalizer_registry.clear()
            util._run_after_forkers()
            util.info('child process calling self.run()')
            try:
                self.run()
                exitcode = 0
            finally:
                util._exit_function()
        except SystemExit, e:
            if not e.args:
                exitcode = 1
            elif type(e.args[0]) is int:
                exitcode = e.args[0]
            else:
                sys.stderr.write(e.args[0] + '\n')
                sys.stderr.flush()
                exitcode = 1
    def shutdown(self, c):
        try:
            try:
                util.debug('manager received shutdown message')
                c.send(('#RETURN', None))
                if sys.stdout != sys.__stdout__:
                    util.debug('resetting stdout, stderr')
                    sys.stdout = sys.__stdout__
                    sys.stderr = sys.__stderr__
                util._run_finalizers(0)
                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.terminate()

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.join()

                util._run_finalizers()
                util.info('manager exiting with exitcode 0')
            except:
                import traceback
                traceback.print_exc()

        finally:
            exit(0)

        return
Exemple #8
0
    def shutdown(self, c):
        '''
        Shutdown this process
        '''
        try:
            try:
                util.debug('manager received shutdown message')
                c.send(('#RETURN', None))

                if sys.stdout != sys.__stdout__:
                    util.debug('resetting stdout, stderr')
                    sys.stdout = sys.__stdout__
                    sys.stderr = sys.__stderr__

                util._run_finalizers(0)

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.terminate()

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.join()

                util._run_finalizers()
                util.info('manager exiting with exitcode 0')
            except:
                import traceback
                traceback.print_exc()
        finally:
            exit(0)
Exemple #9
0
 def _after_fork(self):
     self._manager = None
     try:
         self._incref()
     except Exception as e:
         # the proxy may just be for a manager which has shutdown
         util.info('incref failed: %s' % e)
Exemple #10
0
 def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
           queue_sem):
     while True:
         try:
             with notempty:
                 if not buffer:
                     notempty.wait()
             try:
                 while True:
                     obj = buffer.popleft()
                     if obj is _sentinel:
                         debug('feeder thread got sentinel -- exiting')
                         close()
                         return
                     obj = ForkingPickler.dumps(obj)
                     if sys.platform == 'win32':
                         send_bytes(obj)
                     else:
                         with writelock:
                             send_bytes(obj)
             except IndexError:  # 当buffer为空时popleft会抛出异常
                 pass
         except Exception as e:
             if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                 return
             # Since this runs in a daemon thread the resources it uses may be become unusable while the process is cleaning up.
             # We ignore errors which happen after the process has started to cleanup.
             if is_exiting():
                 info('error in queue thread: %s', e)
                 return
             else:
                 # Since the object has not been sent in the queue, we need to decrease the size of the queue.
                 # The error acts as if the object had been silently removed from the queue and this step is necessary to have a properly working queue.
                 queue_sem.release()
                 traceback.print_exc()
Exemple #11
0
    def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                            # Delete references to object. See issue16284
                            del obj
                        else:
                            wacquire()
                            try:
                                send(obj)
                                # Delete references to object. See issue16284
                                del obj
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
    def _after_fork(self):
        self._manager = None
        try:
            self._incref()
        except Exception as e:
            util.info('incref failed: %s' % e)

        return
Exemple #13
0
 def _run_server(cls, registry, address, authkey, serializer, writer, initializer = None, initargs = ()):
     if initializer is not None:
         initializer(*initargs)
     server = cls._Server(registry, address, authkey, serializer)
     writer.send(server.address)
     writer.close()
     util.info('manager serving at %r', server.address)
     server.serve_forever()
Exemple #14
0
    def _after_fork(self):
        self._manager = None
        try:
            self._incref()
        except Exception as e:
            util.info('incref failed: %s' % e)

        return
    def _feed(buffer, notempty, send_bytes, writelock, close, reducers,
              ignore_epipe, onerror, queue_sem):
        util.debug('starting thread to feed data to pipe')
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        while 1:
            try:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            util.debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        # serialize the data before acquiring the lock
                        obj_ = CustomizableLokyPickler.dumps(
                            obj, reducers=reducers)
                        if wacquire is None:
                            send_bytes(obj_)
                        else:
                            wacquire()
                            try:
                                send_bytes(obj_)
                            finally:
                                wrelease()
                        # Remove references early to avoid leaking memory
                        del obj, obj_
                except IndexError:
                    pass
            except BaseException as e:
                if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                    return
                # Since this runs in a daemon thread the resources it uses
                # may be become unusable while the process is cleaning up.
                # We ignore errors which happen after the process has
                # started to cleanup.
                if util.is_exiting():
                    util.info('error in queue thread: %s', e)
                    return
                else:
                    queue_sem.release()
                    onerror(e, obj)
Exemple #16
0
    def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
Exemple #17
0
 def _run_server(cls, registry, address, authkey, serializer, writer, initializer = None, initargs = ()):
     """
     Create a server, report its address and run it
     """
     if initializer is not None:
         initializer(*initargs)
     server = cls._Server(registry, address, authkey, serializer)
     writer.send(server.address)
     writer.close()
     util.info('manager serving at %r', server.address)
     server.serve_forever()
     return
Exemple #18
0
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None
        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()

                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return
                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()

                except IndexError:
                    pass

        except Exception as e:
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass

        return
Exemple #19
0
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None
        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()

                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return
                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()

                except IndexError:
                    pass

        except Exception as e:
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass

        return
Exemple #20
0
    def _finalize_manager(process, address, authkey, state, _Client):
        '''
        Shutdown the manager process; will be registered as a finalizer
        '''
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    managers.dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()
            except Exception:
                pass

            process.join(timeout=0.2)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')

                    try:
                        process.terminate()
                        process.join(timeout=0.1)
            # XXX: catch the OS error ... something weird is going on here..
                    except OSError:
                        pass
                    if process.is_alive():
                        util.info('manager still alive after terminate')

        state.value = managers.State.SHUTDOWN
        try:
            del managers.BaseProxy._address_to_local[address]
        except KeyError:
            pass
Exemple #21
0
 def _run_server(cls, registry, address, authkey, serializer, writer,
                 conn_writer, initializer=None, initargs=()):
     """ Create a server, report its address and run it."""
     if initializer is not None:
         initializer(*initargs)
     # Create server.
     server = cls._Server(registry, fqaddr(address), authkey, serializer,
                          conn_writer)
     # Inform parent process of the server's address.
     writer.send(server.address)
     writer.close()
     # Run the manager.
     util.info("Server running at {}:{}.".format(*server.address))
     server.serve_forever()
Exemple #22
0
 def _finalize_manager(process, address, authkey, state, conns, _Client):
     """ Shutdown the manager process; will be registered as a
     finalizer."""
     if process.is_alive():
         util.info("Sending shutdown message to manager.")
         try:
             conn = _Client(fqaddr(address), authkey=authkey)
             try:
                 dispatch(conn, None, "shutdown")
             finally:
                 conn.close()
         except Exception:
             pass
         for conn in conns:
             conn.close()
         process.join(timeout=0.2)
         if process.is_alive():
             util.info("Manager still alive.")
             if hasattr(process, "terminate"):
                 util.info("Trying to `terminate()` manager process.")
                 process.terminate()
                 process.join(timeout=0.1)
                 if process.is_alive():
                     util.info("Manager still alive after terminate.")
     state.value = State.SHUTDOWN
     try:
         del BaseProxy._address_to_local[fqaddr(address)]
     except KeyError:
         pass
Exemple #23
0
    def _finalize_manager(process, address, authkey, state, _Client):
        '''
        Shutdown the manager process; will be registered as a finalizer
        '''
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    managers.dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()
            except Exception:
                pass

            process.join(timeout=0.2)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')

                    try:
                        process.terminate()
                        process.join(timeout=0.1)
            # XXX: catch the OS error ... something weird is going on here..
                    except OSError:
                        pass
                    if process.is_alive():
                        util.info('manager still alive after terminate')

        state.value = managers.State.SHUTDOWN
        try:
            del managers.BaseProxy._address_to_local[address]
        except KeyError:
            pass
Exemple #24
0
    def _finalize_manager(process, address, authkey, state, _Client):
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()

            except Exception:
                pass

            process.join(timeout=0.2)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')
                    process.terminate()
                    process.join(timeout=0.1)
                    if process.is_alive():
                        util.info('manager still alive after terminate')
        state.value = State.SHUTDOWN
        try:
            del BaseProxy._address_to_local[address]
        except KeyError:
            pass
    def _run_server(cls, registry, address, authkey, serializer, writer):
        '''
        Create a server, report its address and run it
        '''
        # create server
        server = cls._Server(registry, address, authkey, serializer)

        # inform parent process of the server's address
        writer.send(server.address)
        writer.close()

        # run the manager
        util.info('manager serving at %r', server.address)
        server.serve_forever()
    def _finalize_manager(process, address, authkey, state, _Client):
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()

            except Exception:
                pass

            process.join(timeout=0.2)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')
                    process.terminate()
                    process.join(timeout=0.1)
                    if process.is_alive():
                        util.info('manager still alive after terminate')
        state.value = State.SHUTDOWN
        try:
            del BaseProxy._address_to_local[address]
        except KeyError:
            pass

        return
Exemple #27
0
 def _worker_process(self, finish, job, taskq, doneq, max_run_retries=3,
                     max_send_retries=10):
     """Run a set of jobs."""
     job.setup()
     run_retries = 0
     exitcode = 0
     # Loop over tasks.
     while not taskq.empty():
         # Pop task off queue.
         task = taskq.get()
         success = False
         T0 = Time.time()
         try:
             progress, tmp_fid = job.run(task)
         except EOFError as err:
             # Report what went wrong and retry.
             util.debug("[Client] {}".format(err.msg))
             msg = "[Client] {}/{} failed run retries, {{}}".format(
                 run_retries, max_run_retries)
             if run_retries < max_run_retries:
                 run_retries += 1
                 util.debug(msg.format("retrying..."))
             else:
                 util.debug(msg.format("exiting."))
                 raise err
         else:
             # Send results.
             sent = send_result(tmp_fid, task, retries=max_send_retries)
             if sent or not self.save:
                 # Mark simulation as complete.
                 task["complete"] = True
                 # Task is done.
                 doneq.put(task)
                 success = True
                 # Report progress.
                 progress.task = (taskq.qsize(), T0.delta())
                 progress.report()
         finally:
             if not success:
                 # Task did not complete successfully: put it back in taskq.
                 taskq.put(task)
             taskq.task_done()
             exitcode = 0
             if finish.is_set():
                 exitcode = 100
                 break
     job.teardown()
     util.info("[Client] Process complete: {}.".format(ProcLabel()))
     sys.exit(exitcode)
Exemple #28
0
def _global_after_fork():
    # Previously every app would call:
    #    `register_after_fork(app, app._after_fork)`
    # but this created a leak as `register_after_fork` stores concrete object
    # references and once registered an object cannot be removed without
    # touching and iterating over the private afterfork registry list.
    #
    # See Issue #1949
    from celery import _state
    from multiprocessing.util import info
    for app in _state.apps:
        try:
            app._after_fork()
        except Exception as exc:
            info('after forker raised exception: %r' % (exc, ), exc_info=1)
Exemple #29
0
def _global_after_fork():
    # Previously every app would call:
    #    `register_after_fork(app, app._after_fork)`
    # but this created a leak as `register_after_fork` stores concrete object
    # references and once registered an object cannot be removed without
    # touching and iterating over the private afterfork registry list.
    #
    # See Issue #1949
    from celery import _state
    from multiprocessing.util import info
    for app in _state.apps:
        try:
            app._after_fork()
        except Exception as exc:
            info('after forker raised exception: %r' % (exc, ), exc_info=1)
Exemple #30
0
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        wacquire = writelock.acquire
        wrelease = writelock.release

        try:
            while 1:
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
                yield "Done for now"
        except Exception as e:
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
Exemple #31
0
 def _malloc(self, size):
     i = bisect.bisect_left(self._lengths, size)
     if i == len(self._lengths):
         length = self._roundup(max(self._size, size), mmap.PAGESIZE)
         self._size *= 2
         info('allocating a new mmap of length %d', length)
         arena = Arena(length)
         self._arenas.append(arena)
         return (arena, 0, length)
     length = self._lengths[i]
     seq = self._len_to_seq[length]
     block = seq.pop()
     if not seq:
         del self._len_to_seq[length]
         del self._lengths[i]
     arena, start, stop = block
     del self._start_to_block[arena, start]
     del self._stop_to_block[arena, stop]
     return block
Exemple #32
0
    def get_shared(self):
        """Get shared data from server.

        Return:
            (tuple: dict, multiprocessing.JoinableQueue,
            multiprocessing.Queue, multiprocessing.JoinableQueue):
            Parameters, shared queue for tasks, shared queue for done
            tasks, shared queue for active connections.

        """
        params = self.params()
        self.Job = params["Job"]
        taskq = self.taskq()
        doneq = self.doneq()
        activeq = self.activeq()
        util.info("[Client] Received shared data.")
        # Notify the server that this client is active.
        activeq.put(self.host)
        return params, taskq, doneq, activeq
Exemple #33
0
 def _malloc(self, size):
     i = bisect.bisect_left(self._lengths, size)
     if i == len(self._lengths):
         length = self._roundup(max(self._size, size), mmap.PAGESIZE)
         self._size *= 2
         info('allocating a new mmap of length %d', length)
         arena = Arena(length)
         self._arenas.append(arena)
         return (arena, 0, length)
     length = self._lengths[i]
     seq = self._len_to_seq[length]
     block = seq.pop()
     if not seq:
         del self._len_to_seq[length]
         del self._lengths[i]
     arena, start, stop = block
     del self._start_to_block[arena, start]
     del self._stop_to_block[arena, stop]
     return block
Exemple #34
0
    def start(self, params, tasks, force=False):
        """Start server.

        Args:
            params (dict): Parameters.
            tasks (dict): Tasks.

        Keyword Args:
            force (bool): Forces task into `taskq`, regardless of whether
                          it is already complete.

        """
        super(ServerManager, self).start()
        self.set_shared(params, tasks, force=force)
        # Monitor the tasks and update the tasks file as completed tasks
        # arrive from the clients.
        self.manage_tasks(tasks)
        self.shutdown()
        util.info("[Server] Jobs complete. Shutting down.")
        sys.exit(0)
Exemple #35
0
    def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
Exemple #36
0
    def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
Exemple #37
0
    def _spawn(self, finish, params, taskq, doneq, args, kwargs):
        """Spawn a job process.

        Args:
            finish (multiprocessing.Event): Signals that process is finished.
            params (dict): Parameters.
            taskq (multiprocessing.JoinableQueue): Shared queue for tasks.
            doneq (multiprocessing.Queue): Shared queue for done tasks.
            args (tuple): Arguments for ``_worker_process``.
            kwargs (dict): Keyword arguments for ``_worker_process``.

        Return:
            (multiprocessing.Process): Worker process.

        """
        # Initialize the job.
        job = self.Job(params, *args, **kwargs)
        proc = Process(target=self._worker_process,
                       args=(finish, job, taskq, doneq))
        util.info("[Client] Spawning process: '{}'".format(proc.name))
        return proc
Exemple #38
0
    def handle_request(self, c):
        funcname = result = request = None
        try:
            connection.deliver_challenge(c, self.authkey)
            connection.answer_challenge(c, self.authkey)
            request = c.recv()
            ignore, funcname, args, kwds = request
            assert funcname in self.public, '%r unrecognized' % funcname
            func = getattr(self, funcname)
        except Exception:
            msg = ('#TRACEBACK', format_exc())
        else:
            try:
                result = func(c, *args, **kwds)
            except Exception:
                msg = ('#TRACEBACK', format_exc())
            else:
                msg = ('#RETURN', result)

        try:
            c.send(msg)
        except Exception as e:
            try:
                c.send(('#TRACEBACK', format_exc()))
            except Exception:
                pass

            util.info('Failure to send message: %r', msg)
            util.info(' ... request was %r', request)
            util.info(' ... exception was %r', e)

        c.close()
Exemple #39
0
 def report(self):
     """ Outputs formatted progress text."""
     util.info("\n")
     if self.task:
         util.info(self.task)
     if self.msg:
         util.info(self.msg)
Exemple #40
0
    def bootstrap_2_6_6(self):
        """Pulled from python 2.6.6. Needed to ensure we have the fix from
        http://bugs.python.org/issue5313 when running on python version 2.6.2
        or lower."""

        try:
            self._children = set()
            self._counter = itertools.count(1)
            try:
                sys.stdin.close()
                sys.stdin = open(os.devnull)
            except (OSError, ValueError):
                pass
            multiprocessing._current_process = self
            util._finalizer_registry.clear()
            util._run_after_forkers()
            util.info('child process calling self.run()')
            try:
                self.run()
                exitcode = 0
            finally:
                util._exit_function()
        except SystemExit as e:
            if not e.args:
                exitcode = 1
            elif type(e.args[0]) is int:
                exitcode = e.args[0]
            else:
                sys.stderr.write(e.args[0] + '\n')
                sys.stderr.flush()
                exitcode = 1
        except:
            exitcode = 1
            import traceback
            sys.stderr.write('Process %s:\n' % self.name)
            sys.stderr.flush()
            traceback.print_exc()

        util.info('process exiting with exitcode %d' % exitcode)
        return exitcode
    def handle_request(self, c):
        funcname = result = request = None
        try:
            connection.deliver_challenge(c, self.authkey)
            connection.answer_challenge(c, self.authkey)
            request = c.recv()
            ignore, funcname, args, kwds = request
            func = getattr(self, funcname)
        except Exception:
            msg = ('#TRACEBACK', format_exc())
        else:
            try:
                result = func(c, *args, **kwds)
            except Exception:
                msg = ('#TRACEBACK', format_exc())
            else:
                msg = ('#RETURN', result)

        try:
            c.send(msg)
        except Exception as e:
            try:
                c.send(('#TRACEBACK', format_exc()))
            except Exception:
                pass

            util.info('Failure to send message: %r', msg)
            util.info(' ... request was %r', request)
            util.info(' ... exception was %r', e)

        c.close()
        return
 def handle_request(self, c):
     '''
     Handle a new connection
     '''
     funcname = result = request = None
     try:
         connection.deliver_challenge(c, self.authkey)
         connection.answer_challenge(c, self.authkey)
         request = c.recv()
         ignore, funcname, args, kwds = request
         assert funcname in self.public, '%r unrecognized' % funcname
         func = getattr(self, funcname)
     except Exception:
         msg = ('#TRACEBACK', format_exc())
     else:
         try:
             result = func(c, *args, **kwds)
         except Exception:
             msg = ('#TRACEBACK', format_exc())
         else:
             msg = ('#RETURN', result)
     try:
         c.send(msg)
     except Exception, e:
         try:
             c.send(('#TRACEBACK', format_exc()))
         except Exception:
             pass
         util.info('Failure to send message: %r', msg)
         util.info(' ... request was %r', request)
         util.info(' ... exception was %r', e)
Exemple #43
0
    def start(self, n_procs=None, args=(), kwargs={}, timeout=1e5):
        """ Start the client.

        Keyword Args:
            n_procs (int): Number of processes to run.
            args (tuple): Arguments for ``_worker_process``.
            kwargs (dict): Keyword arguments for ``_worker_process``.
            timeout (float): Time to run process before signaling that it
                             be restarted. This circumvents memory leaks.

        """
        if n_procs is None:
            n_procs = cpu_count()
        self.connect()
        params, taskq, doneq, activeq = self.get_shared()
        kwargs["save"] = params.pop("save", False)
        if kwargs["save"]:
            setup_send(self.user, self.address[0])
        # Arguments for each thread's worker.
        worker_args = (params, taskq, doneq, args, kwargs)
        worker_kwargs = dict(timeout=timeout)
        threads = []
        # Control each process with a Thread that can time itself.
        util.info("[Client] Starting  processes...")
        for iproc in xrange(n_procs):
            name = "Thread_{:02d}".format(iproc)
            thread = Thread(name=name, target=self._worker_thread,
                            args=worker_args, kwargs=worker_kwargs)
            threads.append(thread)
            thread.start()
            thread.join(timeout=0.2)
        # Wait until all threads have completed.
        for thread in threads:
            thread.join()
        # Notifies the server that this client is done.
        activeq.task_done()
        util.info("[Client] Jobs complete.")
        sys.exit(0)
Exemple #44
0
    def manage_tasks(self, tasks):
        """Receives completed tasks from the clients and updates the
        tasks file.

        Args:
            tasks (dict): Tasks.

        """
        t0 = Time.time()
        tasks_pth = path(self.params().get("tasks_pth", ""))
        n_tasks = len(tasks)
        n_done0 = n_tasks - self.taskq().qsize()
        n_done = n_done0
        util.info("[Server] Tasks queued. {}/{} ({:.1f}%%) complete.".format(
            n_done, n_tasks, n_done / n_tasks * 100))
        save_time = Time.time()
        # Set up a thread that joins self.taskq and only returns once
        # all the tasks have completed. The while loops continues as
        # long as taskq_thread is alive.
        taskq_thread = Thread(name="taskq", target=self._taskq_worker,
                              args=(self.taskq,))
        taskq_thread.start()
        while taskq_thread.is_alive():
            # Wait for a done task to arrive.
            task = self.doneq().get()
            # Update the master tasks dict.
            task_name = task["task_name"]
            tasks[task_name].update(task)
            if tasks_pth and save_time.delta() > 10.:
                # Save task to disk.
                update_tasks_file(tasks_pth, tasks, overwrite=True)
                save_time = Time.time()
            # Report progress.
            n_done += 1
            percent = float(n_done) / n_tasks * 100.
            dt = t0.delta()
            time_per_task = dt / float(n_done - n_done0)
            n_left = n_tasks - n_done
            t_left = Time(time_per_task * n_left)
            util.info("[Server] Task `{}` complete:\n\t\t {}/{} ({:.2f}%) {} "
                      "\n\t\t Time left: {}.".format(
                          task_name, n_done, n_tasks, percent, dt, t_left))
        # Save tasks to disk one last time.
        update_tasks_file(tasks_pth, tasks, overwrite=True)
        # Wait for the clients to d/c.
        clients = []
        while not self.activeq().empty():
            clients.append(self.activeq().get())
        util.debug("[Server] Waiting for clients to disconnect:\n\t{}.".format(
            "\n\t".join(clients)))
        self.activeq().join()
        time.sleep(0.5)
        self.activeq().close()
        self.taskq().close()
        self.doneq().close()
        util.info("[Server] Tasks completed.")
Exemple #45
0
    def serve_client(self, conn):
        util.debug('starting server thread to service %r', threading.current_thread().name)
        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj
        while not self.stop:
            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                obj, exposed, gettypeid = id_to_obj[ident]
                if methodname not in exposed:
                    raise AttributeError('method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed))
                function = getattr(obj, methodname)
                try:
                    res = function(*args, **kwds)
                except Exception as e:
                    msg = ('#ERROR', e)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)

            except AttributeError:
                if methodname is None:
                    msg = ('#TRACEBACK', format_exc())
                else:
                    try:
                        fallback_func = self.fallback_mapping[methodname]
                        result = fallback_func(self, conn, ident, obj, *args, **kwds)
                        msg = ('#RETURN', result)
                    except Exception:
                        msg = ('#TRACEBACK', format_exc())

            except EOFError:
                util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name)
                sys.exit(0)
            except Exception:
                msg = ('#TRACEBACK', format_exc())

            try:
                try:
                    send(msg)
                except Exception as e:
                    send(('#UNSERIALIZABLE', repr(msg)))

            except Exception as e:
                util.info('exception in thread serving %r', threading.current_thread().name)
                util.info(' ... message was %r', msg)
                util.info(' ... exception was %r', e)
                conn.close()
                sys.exit(1)
Exemple #46
0
    def _worker_thread(self, params, taskq, doneq, args, kwargs, timeout=1e5):
        """Target function to be run in a thread, which manages the job
        process.

        Args:
            params (dict): Parameters.
            taskq (multiprocessing.JoinableQueue): Shared queue for tasks.
            doneq (multiprocessing.Queue): Shared queue for done tasks.
            args (tuple): Arguments for ``_worker_process``.
            kwargs (dict): Keyword arguments for ``_worker_process``.

        Keyword Args:
            timeout (float): Time to run process before signaling that it
                             be restarted. This circumvents memory leaks.

        """
        util.info("[Client] Starting thread: {}.".format(current_thread()))
        finish = Event()
        # Loop until taskq is empty.
        while True:
            try:
                if taskq.empty():
                    break
            except (EOFError, IOError):
                break
            finish.clear()
            # Spawn a process.
            proc = self._spawn(finish, params, taskq, doneq, args, kwargs)
            proc.start()
            # Wait for timeout (or until the proc finishes), then
            # signal it to finish and wait until it returns.
            proc.join(timeout=timeout)
            finish.set()
            proc.join()
            # Check that the exit was not an error.
            if proc.exitcode == 100:
                util.info("[Client] Process killed, restarting.")
            elif proc.exitcode > 0:
                raise ProcError(proc)
        util.info("[Client] Ending thread: {}.".format(current_thread()))
        sys.exit(0)
            if not e.args:
                exitcode = 1
            elif type(e.args[0]) is int:
                exitcode = e.args[0]
            else:
                sys.stderr.write(e.args[0] + '\n')
                sys.stderr.flush()
                exitcode = 1
        except:
            exitcode = 1
            import traceback
            sys.stderr.write('Process %s:\n' % self.name)
            sys.stderr.flush()
            traceback.print_exc()

        util.info('process exiting with exitcode %d' % exitcode)
        return exitcode


#
# We subclass bytes to avoid accidental transmission of auth keys over network
#


class AuthenticationString(bytes):
    def __reduce__(self):
        from multiprocessing.forking import Popen
        if not Popen.thread_is_spawning():
            raise TypeError('Pickling an AuthenticationString object is '
                            'disallowed for security reasons')
        return AuthenticationString, (bytes(self), )
            except EOFError:
                util.debug('got EOF -- exiting thread serving %r',
                           threading.current_thread().name)
                sys.exit(0)

            except Exception:
                msg = ('#TRACEBACK', format_exc())

            try:
                try:
                    send(msg)
                except Exception, e:
                    send(('#UNSERIALIZABLE', repr(msg)))
            except Exception, e:
                util.info('exception in thread serving %r',
                        threading.current_thread().name)
                util.info(' ... message was %r', msg)
                util.info(' ... exception was %r', e)
                conn.close()
                sys.exit(1)

    def fallback_getvalue(self, conn, ident, obj):
        return obj

    def fallback_str(self, conn, ident, obj):
        return str(obj)

    def fallback_repr(self, conn, ident, obj):
        return repr(obj)

    fallback_mapping = {
    def serve_client(self, conn):
        util.debug('starting server thread to service %r',
                   threading.current_thread().name)
        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj
        while not self.stop:
            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                obj, exposed, gettypeid = id_to_obj[ident]
                if methodname not in exposed:
                    raise AttributeError(
                        'method %r of %r object is not in exposed=%r' %
                        (methodname, type(obj), exposed))
                function = getattr(obj, methodname)
                try:
                    res = function(*args, **kwds)
                except Exception as e:
                    msg = ('#ERROR', e)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)

            except AttributeError:
                if methodname is None:
                    msg = ('#TRACEBACK', format_exc())
                else:
                    try:
                        fallback_func = self.fallback_mapping[methodname]
                        result = fallback_func(self, conn, ident, obj, *args,
                                               **kwds)
                        msg = ('#RETURN', result)
                    except Exception:
                        msg = ('#TRACEBACK', format_exc())

            except EOFError:
                util.debug('got EOF -- exiting thread serving %r',
                           threading.current_thread().name)
                sys.exit(0)
            except Exception:
                msg = ('#TRACEBACK', format_exc())

            try:
                try:
                    send(msg)
                except Exception as e:
                    send(('#UNSERIALIZABLE', repr(msg)))

            except Exception as e:
                util.info('exception in thread serving %r',
                          threading.current_thread().name)
                util.info(' ... message was %r', msg)
                util.info(' ... exception was %r', e)
                conn.close()
                sys.exit(1)

        return