Esempio n. 1
0
def test_pickle_threshold():
    import numpy
    from numpy.testing.utils import assert_array_equal
    A = numpy.ones((5, 5))
    bufs = serialize_object(A, 1024)
    nt.assert_equal(len(bufs), 1)
    B, _ = deserialize_object(bufs)
    assert_array_equal(A, B)

    A = numpy.ones((512, 512))
    bufs = serialize_object(A, 1024)
    nt.assert_equal(len(bufs), 2)
    B, _ = deserialize_object(bufs)
    assert_array_equal(A, B)
Esempio n. 2
0
def test_pickle_threshold():
    numpy = pytest.importorskip('numpy')
    from numpy.testing.utils import assert_array_equal
    A = numpy.ones((5, 5))
    bufs = serialize_object(A, 1024)
    assert len(bufs) == 1
    B, _ = deserialize_object(bufs)
    assert_array_equal(A, B)

    A = numpy.ones((512, 512))
    bufs = serialize_object(A, 1024)
    assert len(bufs) == 2
    B, _ = deserialize_object(bufs)
    assert_array_equal(A, B)
Esempio n. 3
0
def test_tuple():
    tup = (lambda x:x, 1)
    bufs = serialize_object(tup)
    canned = pickle.loads(bufs[0])
    assert isinstance(canned, tuple)
    t2, r = deserialize_object(bufs)
    assert t2[0](t2[1]) == tup[0](tup[1])
Esempio n. 4
0
def test_list():
    lis = [lambda x:x, 1]
    bufs = serialize_object(lis)
    canned = pickle.loads(bufs[0])
    assert isinstance(canned, list)
    l2, r = deserialize_object(bufs)
    assert l2[0](l2[1]) == lis[0](lis[1])
Esempio n. 5
0
def test_list():
    lis = [lambda x:x, 1]
    bufs = serialize_object(lis)
    canned = pickle.loads(bufs[0])
    nt.assert_is_instance(canned, list)
    l2, r = deserialize_object(bufs)
    nt.assert_equal(l2[0](l2[1]), lis[0](lis[1]))
Esempio n. 6
0
def test_tuple():
    tup = (lambda x:x, 1)
    bufs = serialize_object(tup)
    canned = pickle.loads(bufs[0])
    nt.assert_is_instance(canned, tuple)
    t2, r = deserialize_object(bufs)
    nt.assert_equal(t2[0](t2[1]), tup[0](tup[1]))
Esempio n. 7
0
def test_roundtrip_memoryview():
    b = b'asdf' * 1025
    view = memoryview(b)
    bufs = serialize_object(view)
    nt.assert_equal(len(bufs), 2)
    v2, remainder = deserialize_object(bufs)
    nt.assert_equal(remainder, [])
    nt.assert_equal(v2.tobytes(), b)
Esempio n. 8
0
def test_namedtuple():
    p = point(1,2)
    bufs = serialize_object(p)
    canned = pickle.loads(bufs[0])
    nt.assert_is_instance(canned, point)
    p2, r = deserialize_object(bufs, globals())
    nt.assert_equal(p2.x, p.x)
    nt.assert_equal(p2.y, p.y)
Esempio n. 9
0
def test_namedtuple():
    p = point(1,2)
    bufs = serialize_object(p)
    canned = pickle.loads(bufs[0])
    assert isinstance(canned, point)
    p2, r = deserialize_object(bufs, globals())
    assert p2.x == p.x
    assert p2.y == p.y
Esempio n. 10
0
def test_roundtrip_memoryview():
    b = b'asdf' * 1025
    view = memoryview(b)
    bufs = serialize_object(view)
    assert len(bufs) == 2
    v2, remainder = deserialize_object(bufs)
    assert remainder == []
    assert v2.tobytes() == b
Esempio n. 11
0
 def serialize(self, obj):
     """serialize objects.
     
     Must return list of sendable buffers.
     
     Can be extended for more efficient/noncopying serialization of numpy arrays, etc.
     """
     return serialize_object(obj)
Esempio n. 12
0
def test_class():
    @interactive
    class C(object):
        a=5
    bufs = serialize_object(dict(C=C))
    canned = pickle.loads(bufs[0])
    nt.assert_is_instance(canned['C'], CannedClass)
    d, r = deserialize_object(bufs)
    C2 = d['C']
    nt.assert_equal(C2.a, C.a)
Esempio n. 13
0
def test_class():
    @interactive
    class C(object):
        a=5
    bufs = serialize_object(dict(C=C))
    canned = pickle.loads(bufs[0])
    assert isinstance(canned['C'], CannedClass)
    d, r = deserialize_object(bufs)
    C2 = d['C']
    assert C2.a == C.a
Esempio n. 14
0
def test_roundtrip_buffered():
    for obj in [
        dict(a=b"x"*1025),
        b"hello"*500,
        [b"hello"*501, 1,2,3]
    ]:
        bufs = serialize_object(obj)
        assert len(bufs) == 2
        obj2, remainder = deserialize_object(bufs)
        assert remainder == []
        assert obj == obj2
Esempio n. 15
0
def test_roundtrip_buffered():
    for obj in [
        dict(a=b"x"*1025),
        b"hello"*500,
        [b"hello"*501, 1,2,3]
    ]:
        bufs = serialize_object(obj)
        nt.assert_equal(len(bufs), 2)
        obj2, remainder = deserialize_object(bufs)
        nt.assert_equal(remainder, [])
        nt.assert_equal(obj, obj2)
Esempio n. 16
0
def test_numpy():
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object(A)
            bufs = [memoryview(b) for b in bufs]
            B, r = deserialize_object(bufs)
            nt.assert_equal(r, [])
            nt.assert_equal(A.shape, B.shape)
            nt.assert_equal(A.dtype, B.dtype)
            assert_array_equal(A,B)
Esempio n. 17
0
def test_numpy():
    pytest.importorskip('numpy')
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object(A)
            bufs = [memoryview(b) for b in bufs]
            B, r = deserialize_object(bufs)
            assert r == []
            assert A.shape == B.shape
            assert A.dtype == B.dtype
            assert_array_equal(A,B)
Esempio n. 18
0
    def do_apply(self, content, bufs, msg_id, reply_metadata):
        shell = self.shell
        try:
            working = shell.user_ns

            prefix = "_"+str(msg_id).replace("-","")+"_"

            f,args,kwargs = unpack_apply_message(bufs, working, copy=False)

            fname = getattr(f, '__name__', 'f')

            fname = prefix+"f"
            argname = prefix+"args"
            kwargname = prefix+"kwargs"
            resultname = prefix+"result"

            ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
            # print ns
            working.update(ns)
            code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
            try:
                exec(code, shell.user_global_ns, shell.user_ns)
                result = working.get(resultname)
            finally:
                for key in ns:
                    working.pop(key)

            result_buf = serialize_object(result,
                buffer_threshold=self.session.buffer_threshold,
                item_threshold=self.session.item_threshold,
            )

        except:
            # invoke IPython traceback formatting
            shell.showtraceback()
            # FIXME - fish exception info out of shell, possibly left there by
            # run_code.  We'll need to clean up this logic later.
            reply_content = {}
            if shell._reply_content is not None:
                reply_content.update(shell._reply_content)
                # reset after use
                shell._reply_content = None

            self.send_response(self.iopub_socket, u'error', reply_content,
                                ident=self._topic('error'))
            self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
            result_buf = []
        else:
            reply_content = {'status' : 'ok'}

        return reply_content, result_buf
Esempio n. 19
0
def test_numpy_in_seq():
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object((A,1,2,b'hello'))
            canned = pickle.loads(bufs[0])
            nt.assert_is_instance(canned[0], CannedArray)
            tup, r = deserialize_object(bufs)
            B = tup[0]
            nt.assert_equal(r, [])
            nt.assert_equal(A.shape, B.shape)
            nt.assert_equal(A.dtype, B.dtype)
            assert_array_equal(A,B)
Esempio n. 20
0
def test_numpy_in_dict():
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object(dict(a=A,b=1,c=range(20)))
            canned = pickle.loads(bufs[0])
            nt.assert_is_instance(canned['a'], CannedArray)
            d, r = deserialize_object(bufs)
            B = d['a']
            nt.assert_equal(r, [])
            nt.assert_equal(A.shape, B.shape)
            nt.assert_equal(A.dtype, B.dtype)
            assert_array_equal(A,B)
Esempio n. 21
0
def test_numpy_in_dict():
    pytest.importorskip('numpy')
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object(dict(a=A,b=1,c=range(20)))
            canned = pickle.loads(bufs[0])
            assert isinstance(canned['a'], CannedArray)
            d, r = deserialize_object(bufs)
            B = d['a']
            assert r == []
            assert A.shape == B.shape
            assert A.dtype == B.dtype
            assert_array_equal(A,B)
Esempio n. 22
0
def test_numpy_in_seq():
    pytest.importorskip('numpy')
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in DTYPES:
            A = new_array(shape, dtype=dtype)
            bufs = serialize_object((A,1,2,b'hello'))
            canned = pickle.loads(bufs[0])
            assert isinstance(canned[0], CannedArray)
            tup, r = deserialize_object(bufs)
            B = tup[0]
            assert r == []
            assert A.shape == B.shape
            assert A.dtype == B.dtype
            assert_array_equal(A,B)
Esempio n. 23
0
def test_recarray():
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in [
            [('f', float), ('s', '|S10')],
            [('n', int), ('s', '|S1'), ('u', 'uint32')],
        ]:
            A = new_array(shape, dtype=dtype)

            bufs = serialize_object(A)
            B, r = deserialize_object(bufs)
            nt.assert_equal(r, [])
            nt.assert_equal(A.shape, B.shape)
            nt.assert_equal(A.dtype, B.dtype)
            assert_array_equal(A,B)
Esempio n. 24
0
def test_class_inheritance():
    @interactive
    class C(object):
        a=5

    @interactive
    class D(C):
        b=10

    bufs = serialize_object(dict(D=D))
    canned = pickle.loads(bufs[0])
    assert isinstance(canned['D'], CannedClass)
    d, r = deserialize_object(bufs)
    D2 = d['D']
    assert D2.a == D.a
    assert D2.b == D.b
Esempio n. 25
0
def test_recarray():
    pytest.importorskip('numpy')
    from numpy.testing.utils import assert_array_equal
    for shape in SHAPES:
        for dtype in [
            [('f', float), ('s', '|S10')],
            [('n', int), ('s', '|S1'), ('u', 'uint32')],
        ]:
            A = new_array(shape, dtype=dtype)

            bufs = serialize_object(A)
            B, r = deserialize_object(bufs)
            assert r == []
            assert A.shape == B.shape
            assert A.dtype == B.dtype
            assert_array_equal(A,B)
Esempio n. 26
0
def test_class_inheritance():
    @interactive
    class C(object):
        a=5

    @interactive
    class D(C):
        b=10

    bufs = serialize_object(dict(D=D))
    canned = pickle.loads(bufs[0])
    nt.assert_is_instance(canned['D'], CannedClass)
    d, r = deserialize_object(bufs)
    D2 = d['D']
    nt.assert_equal(D2.a, D.a)
    nt.assert_equal(D2.b, D.b)
Esempio n. 27
0
def test_recarray():
    pytest.importorskip('numpy')
    from numpy.testing import assert_array_equal

    for shape in SHAPES:
        for dtype in [
            [('f', float), ('s', '|S10')],
            [('n', int), ('s', '|S1'), ('u', 'uint32')],
        ]:
            A = new_array(shape, dtype=dtype)

            bufs = serialize_object(A)
            B, r = deserialize_object(bufs)
            assert r == []
            assert A.shape == B.shape
            assert A.dtype == B.dtype
            assert_array_equal(A, B)
Esempio n. 28
0
    def publish_data(self, data):
        """publish a data_message on the IOPub channel

        Parameters
        ----------

        data : dict
            The data to be published. Think of it as a namespace.
        """
        session = self.session
        buffers = serialize_object(data,
            buffer_threshold=session.buffer_threshold,
            item_threshold=session.item_threshold,
        )
        content = json_clean(dict(keys=list(data.keys())))
        session.send(self.pub_socket, 'data_message', content=content,
            parent=self.parent_header,
            buffers=buffers,
            ident=self.topic,
        )
Esempio n. 29
0
    def publish_data(self, data):
        """publish a data_message on the IOPub channel

        Parameters
        ----------
        data : dict
            The data to be published. Think of it as a namespace.
        """
        session = self.session
        buffers = serialize_object(
            data,
            buffer_threshold=session.buffer_threshold,
            item_threshold=session.item_threshold,
        )
        content = json_clean(dict(keys=list(data.keys())))
        session.send(
            self.pub_socket,
            'data_message',
            content=content,
            parent=self.parent_header,
            buffers=buffers,
            ident=self.topic,
        )
Esempio n. 30
0
    def worker_watchdog(self, kill_event):
        """ Listens on the pending_result_queue and sends out results via 0mq

        Parameters:
        -----------
        kill_event : threading.Event
              Event to let the thread know when it is time to die.
        """

        logger.debug("[WORKER_WATCHDOG_THREAD] Starting thread")

        while not kill_event.is_set():
            for worker_id, p in self.procs.items():
                if not p.is_alive():
                    logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has died".format(worker_id))
                    try:
                        task = self._tasks_in_progress.pop(worker_id)
                        logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was busy when it died".format(worker_id))
                        try:
                            raise WorkerLost(worker_id, platform.node())
                        except Exception:
                            logger.info("[WORKER_WATCHDOG_THREAD] Putting exception for task {} in the pending result queue".format(task['task_id']))
                            result_package = {'task_id': task['task_id'], 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}
                            pkl_package = pickle.dumps(result_package)
                            self.pending_result_queue.put(pkl_package)
                    except KeyError:
                        logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was not busy when it died".format(worker_id))

                    p = multiprocessing.Process(target=worker, args=(worker_id,
                                                                     self.uid,
                                                                     self.worker_count,
                                                                     self.pending_task_queue,
                                                                     self.pending_result_queue,
                                                                     self.ready_worker_queue,
                                                                     self._tasks_in_progress
                                                                 ), name="HTEX-Worker-{}".format(worker_id))
                    self.procs[worker_id] = p
                    logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has been restarted".format(worker_id))
                time.sleep(self.poll_period)

        logger.critical("[WORKER_WATCHDOG_THREAD] Exiting")
Esempio n. 31
0
def worker(worker_id, pool_id, pool_size, task_queue, result_queue,
           worker_queue, tasks_in_progress):
    """

    Put request token into queue
    Get task from task_queue
    Pop request from queue
    Put result into result_queue
    """
    start_file_logger('{}/block-{}/{}/worker_{}.log'.format(
        args.logdir, args.block_id, pool_id, worker_id),
                      worker_id,
                      name="worker_log",
                      level=logging.DEBUG if args.debug else logging.INFO)

    # Store worker ID as an environment variable
    os.environ['PARSL_WORKER_RANK'] = str(worker_id)
    os.environ['PARSL_WORKER_COUNT'] = str(pool_size)
    os.environ['PARSL_WORKER_POOL_ID'] = str(pool_id)

    # Sync worker with master
    logger.info('Worker {} started'.format(worker_id))
    if args.debug:
        logger.debug("Debug logging enabled")

    while True:
        worker_queue.put(worker_id)

        # The worker will receive {'task_id':<tid>, 'buffer':<buf>}
        req = task_queue.get()
        tasks_in_progress[worker_id] = req
        tid = req['task_id']
        logger.info("Received task {}".format(tid))

        try:
            worker_queue.get()
        except queue.Empty:
            logger.warning(
                "Worker ID: {} failed to remove itself from ready_worker_queue"
                .format(worker_id))
            pass

        try:
            result = execute_task(req['buffer'])
            serialized_result = serialize_object(result)
        except Exception as e:
            logger.info('Caught an exception: {}'.format(e))
            result_package = {
                'task_id':
                tid,
                'exception':
                serialize_object(RemoteExceptionWrapper(*sys.exc_info()))
            }
        else:
            result_package = {'task_id': tid, 'result': serialized_result}
            # logger.debug("Result: {}".format(result))

        logger.info("Completed task {}".format(tid))
        pkl_package = pickle.dumps(result_package)

        result_queue.put(pkl_package)
        tasks_in_progress.pop(worker_id)
Esempio n. 32
0
    def do_apply(self, content, bufs, msg_id, reply_metadata):
        shell = self.shell
        try:
            working = shell.user_ns

            prefix = "_" + str(msg_id).replace("-", "") + "_"

            f, args, kwargs = unpack_apply_message(bufs, working, copy=False)

            fname = getattr(f, '__name__', 'f')

            fname = prefix + "f"
            argname = prefix + "args"
            kwargname = prefix + "kwargs"
            resultname = prefix + "result"

            ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
            # print ns
            working.update(ns)
            code = f"{resultname} = {fname}(*{argname},**{kwargname})"
            try:
                exec(code, shell.user_global_ns, shell.user_ns)
                result = working.get(resultname)
            finally:
                for key in ns:
                    working.pop(key)

            result_buf = serialize_object(
                result,
                buffer_threshold=self.session.buffer_threshold,
                item_threshold=self.session.item_threshold,
            )

        except BaseException as e:
            # invoke IPython traceback formatting
            # this sends the 'error' message
            shell.showtraceback()

            try:
                str_evalue = str(e)
            except Exception as str_error:
                str_evalue = f"Failed to cast exception to string: {str_error}"
            reply_content = {
                'traceback': [],
                'ename': str(type(e).__name__),
                'evalue': str_evalue,
            }
            # get formatted traceback, which ipykernel recorded
            if hasattr(shell, '_last_traceback'):
                # ipykernel 4.4
                reply_content['traceback'] = shell._last_traceback or []
            else:
                self.log.warning("Didn't find a traceback where I expected to")
            shell._last_traceback = None
            e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
            reply_content['engine_info'] = e_info

            self.log.info(
                "Exception in apply request:\n%s", '\n'.join(reply_content['traceback'])
            )
            result_buf = []
            reply_content['status'] = 'error'
        else:
            reply_content = {'status': 'ok'}

        return reply_content, result_buf
Esempio n. 33
0
    def do_apply(self, content, bufs, msg_id, reply_metadata):
        shell = self.shell
        try:
            working = shell.user_ns

            prefix = "_" + str(msg_id).replace("-", "") + "_"

            f, args, kwargs = unpack_apply_message(bufs, working, copy=False)

            fname = getattr(f, '__name__', 'f')

            fname = prefix + "f"
            argname = prefix + "args"
            kwargname = prefix + "kwargs"
            resultname = prefix + "result"

            ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
            # print ns
            working.update(ns)
            code = "%s = %s(*%s,**%s)" % (resultname, fname, argname,
                                          kwargname)
            try:
                exec(code, shell.user_global_ns, shell.user_ns)
                result = working.get(resultname)
            finally:
                for key in ns:
                    working.pop(key)

            result_buf = serialize_object(
                result,
                buffer_threshold=self.session.buffer_threshold,
                item_threshold=self.session.item_threshold,
            )

        except BaseException as e:
            # invoke IPython traceback formatting
            shell.showtraceback()
            reply_content = {
                'traceback': [],
                'ename': unicode_type(type(e).__name__),
                'evalue': safe_unicode(e),
            }
            # get formatted traceback, which ipykernel recorded
            if hasattr(shell, '_last_traceback'):
                # ipykernel 4.4
                reply_content['traceback'] = shell._last_traceback or []
            elif hasattr(shell, '_reply_content'):
                # ipykernel <= 4.3
                if shell._reply_content and 'traceback' in shell._reply_content:
                    reply_content['traceback'] = shell._reply_content[
                        'traceback']
            else:
                self.log.warning("Didn't find a traceback where I expected to")
            shell._last_traceback = None
            e_info = dict(engine_uuid=self.ident,
                          engine_id=self.int_id,
                          method='apply')
            reply_content['engine_info'] = e_info

            self.send_response(self.iopub_socket,
                               u'error',
                               reply_content,
                               ident=self._topic('error'))
            self.log.info("Exception in apply request:\n%s",
                          '\n'.join(reply_content['traceback']))
            result_buf = []
            reply_content['status'] = 'error'
        else:
            reply_content = {'status': 'ok'}

        return reply_content, result_buf
Esempio n. 34
0
def runner(incoming_q, outgoing_q):
    ''' This is a function that mocks the Swift-T side. It listens on the the
    incoming_q for tasks and posts returns on the outgoing_q

    Args:
         - incoming_q (Queue object) : The queue to listen on
         - outgoing_q (Queue object) : Queue to post results on

    The messages posted on the incoming_q will be of the form :

    .. code:: python

       {
          "task_id" : <uuid.uuid4 string>,
          "buffer"  : serialized buffer containing the fn, args and kwargs
       }

    If ``None`` is received, the runner will exit.

    Response messages should be of the form:

    .. code:: python

       {
          "task_id" : <uuid.uuid4 string>,
          "result"  : serialized buffer containing result
          "exception" : serialized exception object
       }

    On exiting the runner will post ``None`` to the outgoing_q

    '''
    logger.debug("[RUNNER] Starting")

    def execute_task(bufs):
        ''' Deserialize the buf, and execute the task.
        Returns the serialized result/exception
        '''
        all_names = dir(__builtins__)
        user_ns = locals()
        user_ns.update(
            {'__builtins__': {k: getattr(__builtins__, k)
                              for k in all_names}})

        f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)

        fname = getattr(f, '__name__', 'f')
        prefix = "parsl_"
        fname = prefix + "f"
        argname = prefix + "args"
        kwargname = prefix + "kwargs"
        resultname = prefix + "result"

        user_ns.update({
            fname: f,
            argname: args,
            kwargname: kwargs,
            resultname: resultname
        })

        code = "{0} = {1}(*{2}, **{3})".format(resultname, fname, argname,
                                               kwargname)

        try:

            print("[RUNNER] Executing : {0}".format(code))
            exec(code, user_ns, user_ns)

        except Exception as e:
            logger.warning("Caught errors but will not handled %s", e)
            raise e

        else:
            # print("Done : {0}".format(locals()))
            print("[RUNNER] Result    : {0}".format(user_ns.get(resultname)))
            return user_ns.get(resultname)

    while True:
        try:
            # Blocking wait on the queue
            msg = incoming_q.get(block=True, timeout=10)
            # logger.debug("[RUNNER] Got message : %s", msg)

        except queue.Empty:
            # Handle case where no items were on queue
            logger.debug("[RUNNER] got nothing")

        except IOError as ioerror:
            logger.debug("[RUNNER] broken pipe, error: %s", ioerror)
            try:
                # Attempt to send a stop notification to the management thread
                outgoing_q.put(None)

            except Exception:
                pass

            break

        except Exception as e:
            logger.debug("[RUNNER] caught unknown exception : %s", e)

        else:
            # Handle received message
            if not msg:
                # Empty message is a die request
                logger.debug("[RUNNER] Received exit request")
                outgoing_q.put(None)
                break
            else:
                # Received a valid message, handle it
                logger.debug("[RUNNER] Got a valid task : %s", msg["task_id"])
                try:
                    response_obj = execute_task(msg['buffer'])
                    response = {
                        "task_id": msg["task_id"],
                        "result": serialize_object(response_obj)
                    }

                    logger.warning("[RUNNER] Returing result : %s",
                                   deserialize_object(response["result"]))

                except Exception as e:
                    logger.debug("[RUNNER] Caught task exception")
                    response = {
                        "task_id": msg["task_id"],
                        "exception": serialize_object(e)
                    }

                outgoing_q.put(response)

    logger.debug("[RUNNER] Terminating")
Esempio n. 35
0
    fname = prefix + "f"
    argname = prefix + "args"
    kwargname = prefix + "kwargs"
    resultname = prefix + "result"

    user_ns.update({fname: f,
                    argname: args,
                    kwargname: kwargs,
                    resultname: resultname})

    code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
                                           argname, kwargname)

    try:
        exec(code, user_ns, user_ns)
    except Exception as e:
        print(e)
        exec_info = sys.exc_info()
        result_package = {"failure": True, "result": serialize_object(exec_info)}
    else:
        result = user_ns.get(resultname)
        result_package = {"failure": False, "result": serialize_object(result)}
    try:
        f = open(output_result_file, "wb")
        pickle.dump(result_package, f)
        f.close()
        exit(0)
    except Exception as e:
        print(e)
        exit(4)
Esempio n. 36
0
    def start(self, poll_period=None):
        """ Start the NeedNameQeueu

        Parameters:
        ----------

        TODO: Move task receiving to a thread
        """
        logger.info("Incoming ports bound")

        if poll_period is None:
            poll_period = self.poll_period

        start = time.time()
        count = 0

        self._kill_event = threading.Event()
        self._task_puller_thread = threading.Thread(
            target=self.migrate_tasks_to_internal,
            args=(self._kill_event, ),
            name="Interchange-Task-Puller")
        self._task_puller_thread.start()

        self._command_thread = threading.Thread(target=self._command_server,
                                                args=(self._kill_event, ),
                                                name="Interchange-Command")
        self._command_thread.start()

        poller = zmq.Poller()
        # poller.register(self.task_incoming, zmq.POLLIN)
        poller.register(self.task_outgoing, zmq.POLLIN)
        poller.register(self.results_incoming, zmq.POLLIN)

        # These are managers which we should examine in an iteration
        # for scheduling a job (or maybe any other attention?).
        # Anything altering the state of the manager should add it
        # onto this list.
        interesting_managers = set()

        while not self._kill_event.is_set():
            self.socks = dict(poller.poll(timeout=poll_period))

            # Listen for requests for work
            if self.task_outgoing in self.socks and self.socks[
                    self.task_outgoing] == zmq.POLLIN:
                logger.debug("[MAIN] starting task_outgoing section")
                message = self.task_outgoing.recv_multipart()
                manager = message[0]

                if manager not in self._ready_manager_queue:
                    reg_flag = False

                    try:
                        msg = json.loads(message[1].decode('utf-8'))
                        msg['reg_time'] = datetime.datetime.strptime(
                            msg['reg_time'], "%Y-%m-%d %H:%M:%S")
                        reg_flag = True
                    except Exception:
                        logger.warning(
                            "[MAIN] Got Exception reading registration message from manager: {}"
                            .format(manager),
                            exc_info=True)
                        logger.debug("[MAIN] Message :\n{}\n".format(
                            message[0]))

                    # By default we set up to ignore bad nodes/registration messages.
                    self._ready_manager_queue[manager] = {
                        'last': time.time(),
                        'free_capacity': 0,
                        'block_id': None,
                        'max_capacity': 0,
                        'worker_count': 0,
                        'active': True,
                        'tasks': []
                    }
                    if reg_flag is True:
                        interesting_managers.add(manager)
                        logger.info(
                            "[MAIN] Adding manager: {} to ready queue".format(
                                manager))
                        self._ready_manager_queue[manager].update(msg)
                        logger.info(
                            "[MAIN] Registration info for manager {}: {}".
                            format(manager, msg))
                        if self.monitoring_enabled:
                            logger.info("Sending message {} to hub".format(
                                self._ready_manager_queue[manager]))
                            self.hub_channel.send_pyobj(
                                (MessageType.NODE_INFO,
                                 self._ready_manager_queue[manager]))

                        if (msg['python_v'].rsplit(".", 1)[0] !=
                                self.current_platform['python_v'].rsplit(
                                    ".", 1)[0] or msg['parsl_v'] !=
                                self.current_platform['parsl_v']):
                            logger.warn(
                                "[MAIN] Manager {} has incompatible version info with the interchange"
                                .format(manager))

                            if self.suppress_failure is False:
                                logger.debug("Setting kill event")
                                self._kill_event.set()
                                e = ManagerLost(
                                    manager, self._ready_manager_queue[manager]
                                    ['hostname'])
                                result_package = {
                                    'task_id': -1,
                                    'exception': serialize_object(e)
                                }
                                pkl_package = pickle.dumps(result_package)
                                self.results_outgoing.send(pkl_package)
                                logger.warning(
                                    "[MAIN] Sent failure reports, unregistering manager"
                                )
                            else:
                                logger.debug(
                                    "[MAIN] Suppressing shutdown due to version incompatibility"
                                )
                        else:
                            logger.info(
                                "[MAIN] Manager {} has compatible Parsl version {}"
                                .format(manager, msg['parsl_v']))
                            logger.info(
                                "[MAIN] Manager {} has compatible Python version {}"
                                .format(manager,
                                        msg['python_v'].rsplit(".", 1)[0]))
                    else:
                        # Registration has failed.
                        if self.suppress_failure is False:
                            self._kill_event.set()
                            e = BadRegistration(manager, critical=True)
                            result_package = {
                                'task_id': -1,
                                'exception': serialize_object(e)
                            }
                            pkl_package = pickle.dumps(result_package)
                            self.results_outgoing.send(pkl_package)
                        else:
                            logger.debug(
                                "[MAIN] Suppressing bad registration from manager:{}"
                                .format(manager))

                else:
                    tasks_requested = int.from_bytes(message[1], "little")
                    self._ready_manager_queue[manager]['last'] = time.time()
                    if tasks_requested == HEARTBEAT_CODE:
                        logger.debug(
                            "[MAIN] Manager {} sent heartbeat".format(manager))
                        self.task_outgoing.send_multipart(
                            [manager, b'', PKL_HEARTBEAT_CODE])
                    else:
                        logger.debug(
                            "[MAIN] Manager {} requested {} tasks".format(
                                manager, tasks_requested))
                        self._ready_manager_queue[manager][
                            'free_capacity'] = tasks_requested
                        interesting_managers.add(manager)
                logger.debug("[MAIN] leaving task_outgoing section")

            # If we had received any requests, check if there are tasks that could be passed

            logger.debug("Managers count (total/interesting): {}/{}".format(
                len(self._ready_manager_queue), len(interesting_managers)))

            if interesting_managers and not self.pending_task_queue.empty():
                shuffled_managers = list(interesting_managers)
                random.shuffle(shuffled_managers)

                while shuffled_managers and not self.pending_task_queue.empty(
                ):  # cf. the if statement above...
                    manager = shuffled_managers.pop()
                    tasks_inflight = len(
                        self._ready_manager_queue[manager]['tasks'])
                    real_capacity = min(
                        self._ready_manager_queue[manager]['free_capacity'],
                        self._ready_manager_queue[manager]['max_capacity'] -
                        tasks_inflight)

                    if (real_capacity
                            and self._ready_manager_queue[manager]['active']):
                        tasks = self.get_tasks(real_capacity)
                        if tasks:
                            self.task_outgoing.send_multipart(
                                [manager, b'',
                                 pickle.dumps(tasks)])
                            task_count = len(tasks)
                            count += task_count
                            tids = [t['task_id'] for t in tasks]
                            self._ready_manager_queue[manager][
                                'free_capacity'] -= task_count
                            self._ready_manager_queue[manager]['tasks'].extend(
                                tids)
                            logger.debug(
                                "[MAIN] Sent tasks: {} to manager {}".format(
                                    tids, manager))
                            if self._ready_manager_queue[manager][
                                    'free_capacity'] > 0:
                                logger.debug(
                                    "[MAIN] Manager {} has free_capacity {}".
                                    format(
                                        manager,
                                        self._ready_manager_queue[manager]
                                        ['free_capacity']))
                                # ... so keep it in the interesting_managers list
                            else:
                                logger.debug(
                                    "[MAIN] Manager {} is now saturated".
                                    format(manager))
                                interesting_managers.remove(manager)
                    else:
                        interesting_managers.remove(manager)
                        # logger.debug("Nothing to send to manager {}".format(manager))
                logger.debug(
                    "[MAIN] leaving _ready_manager_queue section, with {} managers still interesting"
                    .format(len(interesting_managers)))
            else:
                logger.debug(
                    "[MAIN] either no interesting managers or no tasks, so skipping manager pass"
                )
            # Receive any results and forward to client
            if self.results_incoming in self.socks and self.socks[
                    self.results_incoming] == zmq.POLLIN:
                logger.debug("[MAIN] entering results_incoming section")
                manager, *b_messages = self.results_incoming.recv_multipart()
                if manager not in self._ready_manager_queue:
                    logger.warning(
                        "[MAIN] Received a result from a un-registered manager: {}"
                        .format(manager))
                else:
                    logger.debug("[MAIN] Got {} result items in batch".format(
                        len(b_messages)))
                    for b_message in b_messages:
                        r = pickle.loads(b_message)
                        # logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
                        self._ready_manager_queue[manager]['tasks'].remove(
                            r['task_id'])
                    self.results_outgoing.send_multipart(b_messages)
                    logger.debug("[MAIN] Current tasks: {}".format(
                        self._ready_manager_queue[manager]['tasks']))
                logger.debug("[MAIN] leaving results_incoming section")

            bad_managers = [
                manager for manager in self._ready_manager_queue
                if time.time() - self._ready_manager_queue[manager]['last'] >
                self.heartbeat_threshold
            ]
            for manager in bad_managers:
                logger.debug("[MAIN] Last: {} Current: {}".format(
                    self._ready_manager_queue[manager]['last'], time.time()))
                logger.warning(
                    "[MAIN] Too many heartbeats missed for manager {}".format(
                        manager))

                for tid in self._ready_manager_queue[manager]['tasks']:
                    try:
                        raise ManagerLost(
                            manager,
                            self._ready_manager_queue[manager]['hostname'])
                    except Exception:
                        result_package = {
                            'task_id':
                            tid,
                            'exception':
                            serialize_object(
                                RemoteExceptionWrapper(*sys.exc_info()))
                        }
                        pkl_package = pickle.dumps(result_package)
                        self.results_outgoing.send(pkl_package)
                        logger.warning(
                            "[MAIN] Sent failure reports, unregistering manager"
                        )
                self._ready_manager_queue.pop(manager, 'None')
                if manager in interesting_managers:
                    interesting_managers.remove(manager)

        delta = time.time() - start
        logger.info("Processed {} tasks in {} seconds".format(count, delta))
        logger.warning("Exiting")
Esempio n. 37
0
    def start(self, poll_period=1):
        """ Start the NeedNameQeueu

        Parameters:
        ----------

        poll_period : int
              Poll period in milliseconds

        TODO: Move task receiving to a thread
        """
        logger.info("Incoming ports bound")

        start = time.time()
        count = 0

        self._kill_event = threading.Event()
        self._task_puller_thread = threading.Thread(
            target=self.migrate_tasks_to_internal, args=(self._kill_event, ))
        self._task_puller_thread.start()

        self._command_thread = threading.Thread(target=self._command_server,
                                                args=(self._kill_event, ))
        self._command_thread.start()

        poller = zmq.Poller()
        # poller.register(self.task_incoming, zmq.POLLIN)
        poller.register(self.task_outgoing, zmq.POLLIN)
        poller.register(self.results_incoming, zmq.POLLIN)

        while not self._kill_event.is_set():
            self.socks = dict(poller.poll(timeout=poll_period))

            # Listen for requests for work
            if self.task_outgoing in self.socks and self.socks[
                    self.task_outgoing] == zmq.POLLIN:
                message = self.task_outgoing.recv_multipart()
                manager = message[0]

                if manager not in self._ready_manager_queue:
                    msg = json.loads(message[1].decode('utf-8'))
                    logger.info(
                        "[MAIN] Adding manager: {} to ready queue".format(
                            manager))
                    self._ready_manager_queue[manager] = {
                        'last': time.time(),
                        'free_capacity': 0,
                        'active': True,
                        'tasks': []
                    }
                    self._ready_manager_queue[manager].update(msg)
                    logger.info("Registration info for manager {}: {}".format(
                        manager, msg))
                    if (msg['python_v'] != self.current_platform['python_v']
                            or msg['parsl_v'] !=
                            self.current_platform['parsl_v']):
                        logger.warn(
                            "Manager {} has incompatible version info with the interchange"
                            .format(manager))
                        logger.debug("Setting kill event")
                        self._kill_event.set()
                        e = ManagerLost(manager)
                        result_package = {
                            'task_id': -1,
                            'exception': serialize_object(e)
                        }
                        pkl_package = pickle.dumps(result_package)
                        self.results_outgoing.send(pkl_package)
                        logger.warning(
                            "[MAIN] Sent failure reports, unregistering manager"
                        )

                else:
                    tasks_requested = int.from_bytes(message[1], "little")
                    logger.debug("[MAIN] Manager {} requested {} tasks".format(
                        manager, tasks_requested))
                    self._ready_manager_queue[manager]['last'] = time.time()
                    if tasks_requested == HEARTBEAT_CODE:
                        logger.debug(
                            "[MAIN] Manager {} sends heartbeat".format(
                                manager))
                        self.task_outgoing.send_multipart(
                            [manager, b'', PKL_HEARTBEAT_CODE])
                    else:
                        self._ready_manager_queue[manager][
                            'free_capacity'] = tasks_requested

            # If we had received any requests, check if there are tasks that could be passed
            logger.debug("Managers: {}".format(self._ready_manager_queue))
            if self._ready_manager_queue:
                shuffled_managers = list(self._ready_manager_queue.keys())
                random.shuffle(shuffled_managers)
                logger.debug("Shuffled : {}".format(shuffled_managers))
                # for manager in self._ready_manager_queue:
                for manager in shuffled_managers:
                    if (self._ready_manager_queue[manager]['free_capacity']
                            and self._ready_manager_queue[manager]['active']):
                        tasks = self.get_tasks(
                            self._ready_manager_queue[manager]
                            ['free_capacity'])
                        if tasks:
                            self.task_outgoing.send_multipart(
                                [manager, b'',
                                 pickle.dumps(tasks)])
                            task_count = len(tasks)
                            count += task_count
                            tids = [t['task_id'] for t in tasks]
                            logger.debug("[MAIN] Sent tasks: {} to {}".format(
                                tids, manager))
                            self._ready_manager_queue[manager][
                                'free_capacity'] -= task_count
                            self._ready_manager_queue[manager]['tasks'].extend(
                                tids)
                    else:
                        logger.debug("Nothing to send")

            # Receive any results and forward to client
            if self.results_incoming in self.socks and self.socks[
                    self.results_incoming] == zmq.POLLIN:
                manager, *b_messages = self.results_incoming.recv_multipart()
                if manager not in self._ready_manager_queue:
                    logger.warning(
                        "[MAIN] Received a result from a un-registered manager: {}"
                        .format(manager))
                else:
                    logger.debug("[MAIN] Got {} result items in batch".format(
                        len(b_messages)))
                    for b_message in b_messages:
                        r = pickle.loads(b_message)
                        # logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
                        self._ready_manager_queue[manager]['tasks'].remove(
                            r['task_id'])
                    self.results_outgoing.send_multipart(b_messages)
                    logger.debug("[MAIN] Current tasks: {}".format(
                        self._ready_manager_queue[manager]['tasks']))

            bad_managers = [
                manager for manager in self._ready_manager_queue
                if time.time() - self._ready_manager_queue[manager]['last'] >
                self.heartbeat_threshold
            ]
            for manager in bad_managers:
                logger.debug("[MAIN] Last: {} Current: {}".format(
                    self._ready_manager_queue[manager]['last'], time.time()))
                logger.warning(
                    "[MAIN] Too many heartbeats missed for manager {}".format(
                        manager))
                e = ManagerLost(manager)
                for tid in self._ready_manager_queue[manager]['tasks']:
                    result_package = {
                        'task_id': tid,
                        'exception': serialize_object(e)
                    }
                    pkl_package = pickle.dumps(result_package)
                    self.results_outgoing.send(pkl_package)
                    logger.warning(
                        "[MAIN] Sent failure reports, unregistering manager")
                self._ready_manager_queue.pop(manager, 'None')

        delta = time.time() - start
        logger.info("Processed {} tasks in {} seconds".format(count, delta))
        logger.warning("Exiting")
Esempio n. 38
0
    def do_apply(self, content, bufs, msg_id, reply_metadata):
        shell = self.shell
        try:
            working = shell.user_ns

            prefix = "_"+str(msg_id).replace("-","")+"_"

            f,args,kwargs = unpack_apply_message(bufs, working, copy=False)

            fname = getattr(f, '__name__', 'f')

            fname = prefix+"f"
            argname = prefix+"args"
            kwargname = prefix+"kwargs"
            resultname = prefix+"result"

            ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
            # print ns
            working.update(ns)
            code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
            try:
                exec(code, shell.user_global_ns, shell.user_ns)
                result = working.get(resultname)
            finally:
                for key in ns:
                    working.pop(key)

            result_buf = serialize_object(result,
                buffer_threshold=self.session.buffer_threshold,
                item_threshold=self.session.item_threshold,
            )

        except BaseException as e:
            # invoke IPython traceback formatting
            shell.showtraceback()
            reply_content = {
                'traceback': [],
                'ename': unicode_type(type(e).__name__),
                'evalue': safe_unicode(e),
            }
            # get formatted traceback, which ipykernel recorded
            if hasattr(shell, '_last_traceback'):
                # ipykernel 4.4
                reply_content['traceback'] = shell._last_traceback or []
            elif hasattr(shell, '_reply_content'):
                # ipykernel <= 4.3
                if shell._reply_content and 'traceback' in shell._reply_content:
                    reply_content['traceback'] = shell._reply_content['traceback']
            else:
                self.log.warning("Didn't find a traceback where I expected to")
            shell._last_traceback = None
            e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
            reply_content['engine_info'] = e_info

            self.send_response(self.iopub_socket, u'error', reply_content,
                                ident=self._topic('error'))
            self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
            result_buf = []
            reply_content['status'] = 'error'
        else:
            reply_content = {'status' : 'ok'}

        return reply_content, result_buf
Esempio n. 39
0
def roundtrip(obj):
    """roundtrip an object through serialization"""
    bufs = serialize_object(obj)
    obj2, remainder = deserialize_object(bufs)
    assert remainder == []
    return obj2
Esempio n. 40
0
def roundtrip(obj):
    """roundtrip an object through serialization"""
    bufs = serialize_object(obj)
    obj2, remainder = deserialize_object(bufs)
    nt.assert_equals(remainder, [])
    return obj2
Esempio n. 41
0
def roundtrip(obj):
    """roundtrip an object through serialization"""
    bufs = serialize_object(obj)
    obj2, remainder = deserialize_object(bufs)
    assert remainder == []
    return obj2