def test_stars_pickleable():
    """
    Verify that EPSFStars can be successfully
    pickled/unpickled for use multiprocessing
    """
    from multiprocessing.reduction import ForkingPickler
    # Doesn't need to actually contain anything useful
    stars = EPSFStars([1])
    # This should not blow up
    ForkingPickler.loads(ForkingPickler.dumps(stars))
Exemple #2
0
 def test_multiprocessing(self, device=None, dtype=None):
     """multiprocessing sharing with 'device' and 'dtype'"""
     buf = io.BytesIO()
     t = MetaTensor([0.0, 0.0], device=device, dtype=dtype)
     if t.is_cuda:
         with self.assertRaises(NotImplementedError):
             ForkingPickler(buf).dump(t)
         return
     ForkingPickler(buf).dump(t)
     obj = ForkingPickler.loads(buf.getvalue())
     self.assertIsInstance(obj, MetaTensor)
     assert_allclose(obj.as_tensor(), t)
Exemple #3
0
    def get(self, timeout: typing.Optional[float] = None) -> T:
        """
        Get one object from the queue raising queue.Empty if unavailable.

        Raises EOFError on exhausted queue whenever sending half has hung up.
        """
        with self._read_lock:
            if self._reader.poll(timeout):
                recv = self._reader.recv_bytes()
            else:
                raise queue.Empty
        return ForkingPickler.loads(recv)
Exemple #4
0
 def data_received(self, data):  # I'm worried this will be slower...
     data = self._data + data
     while True:
         size, = struct.unpack("!i", data[:4])
         if len(data) < size:
             self._data = data
             print('breaking')
             break
         node = ForkingPickler.loads(data[4:4 + size])  # ARE YOU SERIOUS
         if self.render_:
             # FIXME either of these cause panda related segfaults  # only on athena >_<
             # on the other hand, doesn't exit as fast on luz
             #node.reparentTo(self.collRoot)
             self.coll_add_queue.append(node)
         #self.nodes.append(node)
         self.cache[self.request_hash] = self.geom, node, self.ui
         data = data[4 + size:]
         if not data:
             break
Exemple #5
0
 def data_received(self, data):  # I'm worried this will be slower...
     data = self._data + data
     while True:
         size, = struct.unpack("!i", data[:4])
         if len(data) < size:
             self._data = data
             print('breaking')
             break
         node = ForkingPickler.loads(data[4:4+size])  # ARE YOU SERIOUS
         if self.render_:
             # FIXME either of these cause panda related segfaults  # only on athena >_<
             # on the other hand, doesn't exit as fast on luz
             #node.reparentTo(self.collRoot)
             self.coll_add_queue.append(node)
         #self.nodes.append(node)
         self.cache[self.request_hash] = self.geom, node, self.ui
         data = data[4+size:]
         if not data:
             break
Exemple #6
0
 def get(self, block=True, timeout=None):
     if block and timeout is None:
         with self._rlock:
             res = self._recv_bytes()
         self._sem.release()
     else:
         if block:
             deadline = time.monotonic() + timeout
         if not self._rlock.acquire(block, timeout):
             raise Empty
         try:
             if block:
                 timeout = deadline - time.monotonic()  # 减掉获取进程锁_rlock耗时
                 if not self._poll(timeout):
                     raise Empty
             elif not self._poll():
                 raise Empty
             res = self._recv_bytes()
             self._sem.release()
         finally:
             self._rlock.release()
     return ForkingPickler.loads(res)
Exemple #7
0
 def get(self, block=True, timeout=None):
     if block and timeout is None:
         with self._rlock:
             res = self._recv_bytes()
         self._sem.release()
     else:
         if block:
             deadline = time.time() + timeout
         if not self._rlock.acquire(block, timeout):
             raise Empty
         try:
             if block:
                 timeout = deadline - time.time()
                 if timeout < 0 or not self._poll(timeout):
                     raise Empty
             elif not self._poll():
                 raise Empty
             res = self._recv_bytes()
             self._sem.release()
         finally:
             self._rlock.release()
     # unserialize the data after having released the lock
     return LokyPickler.loads(res)
Exemple #8
0
 def process_data(self):
     while True:
         size, = struct.unpack("!i", self.__data[:4])
         if len(self.__data) < size:
             print('yielding')
             yield from asyncio.sleep(1, loop=self.event_loop)
             continue
         #yield self.__data[4:4+size]
         node = ForkingPickler.loads(self.__data[4:4 + size])
         self.cache[self.request_hash] = self.geom, node, self.ui
         print(node)
         if self.render_:
             # FIXME either of these cause panda related segfaults  # only on athena >_<
             # on the other hand, doesn't exit as fast on luz
             node.reparentTo(self.collRoot)
             #self.coll_add_queue.append(node)
         #self.nodes.append(node)
         self.__data = self.__data[4 + size:]
         if not self.__data:
             print('I only get here once!')
             break
         elif len(self.__data) < 4:
             #self._data = data
             continue
Exemple #9
0
 def process_data(self):
     while True:
         size, = struct.unpack("!i", self.__data[:4])
         if len(self.__data) < size:
             print('yielding')
             yield from asyncio.sleep(1, loop=self.event_loop)
             continue
         #yield self.__data[4:4+size]
         node = ForkingPickler.loads(self.__data[4:4+size])
         self.cache[self.request_hash] = self.geom, node, self.ui
         print(node)
         if self.render_:
             # FIXME either of these cause panda related segfaults  # only on athena >_<
             # on the other hand, doesn't exit as fast on luz
             node.reparentTo(self.collRoot)
             #self.coll_add_queue.append(node)
         #self.nodes.append(node)
         self.__data = self.__data[4+size:]
         if not self.__data:
             print('I only get here once!')
             break
         elif len(self.__data) < 4:
             #self._data = data
             continue
Exemple #10
0
 def get(self):
     with self._rlock:
         res = self._reader.recv_bytes()
     return ForkingPickler.loads(res)
Exemple #11
0
 def get(self):
     with self._rlock:
         res = self._reader.recv_bytes()
     # unserialize the data after having released the lock
     return LokyPickler.loads(res)
import photutils
from multiprocessing.reduction import ForkingPickler

stars = photutils.psf.EPSFStars([1])

foo = ForkingPickler.loads(ForkingPickler.dumps(stars))