Ejemplo n.º 1
0
 def __init__(self):
     from IPython import get_ipython
     self.__kernel = get_ipython().kernel
     self.__old_api = version(ipykernel.__version__) < version("5.0")
     if self.__old_api:
         logging.warning(
             "Pre/post kernel handler hooks must be disable for the "
             "old ipykernel API to enable fetching shell messages "
             "from child threads.")
         self.__kernel.post_handler_hook = lambda: None
         self.__kernel.pre_handler_hook = lambda: None
     self.qsize_old = 0
Ejemplo n.º 2
0
Archivo: base.py Proyecto: maydewd/stoq
 def max_version(self):
     if self.max_stoq_version:
         return version(__version__) < version(self.max_stoq_version)
     else:
         return True
Ejemplo n.º 3
0
Archivo: base.py Proyecto: maydewd/stoq
 def min_version(self):
     if self.min_stoq_version:
         return version(__version__) >= version(self.min_stoq_version)
     else:
         return True
Ejemplo n.º 4
0
        "executing 'jupyter notebook'.")

if interactive_mode():
    # Google colab is using an older version of ipykernel (4.10), which is
    # not compatible with >= 5.0. The new API is more flexible and enable
    # to process only the relevant messages because every incoming messages
    # is first added in a priority queue waiting for being processed. Thus,
    # it is possible to process part of those messages without altering the
    # other ones. It is not possible with the old API since every incoming
    # message must be ever processed just after flushing, or discarded.
    # Emulating or restore the queue would be possible theoretically but it
    # is tricky to do it properly, so instead every message is process
    # without distinction.
    import ipykernel
    from pkg_resources import parse_version as version
    if version(ipykernel.__version__) >= version("5.0"):
        import tornado.gen
        from ipykernel.kernelbase import SHELL_PRIORITY
    else:
        logging.warning(
            "Old ipykernel version < 5.0 detected. Please do not schedule "
            "other cells for execution while the viewer is busy otherwise "
            "it will be not executed properly.\nUpdate to a newer version "
            "if possible to avoid such limitation.")

    class CommProcessor:
        """Re-implementation of ipykernel.kernelbase.do_one_iteration to only
        handle comm messages on the spot, and put back in the stack the other
        ones.

        Calling 'do_one_iteration' messes up with kernel `msg_queue`. Some
Ejemplo n.º 5
0
def npToTuple(M):
    if M.ndim == 1:
        return tuple(M.tolist())
    else:
        if M.shape[0] == 1:
            return tuple(M.tolist()[0])
        if M.shape[1] == 1:
            return tuple(M.T.tolist()[0])
        return npToTTuple(M)


pin.rpy.npToTuple = npToTuple

# `__version__` attribute exists since 2.1.1, but not properly maintained (2.4.0 and 2.4.1 are missing it...).
# On the contrary, `printVersion` has always been available and maintained.
if version(pin.printVersion()) < version("2.3.0"):

    def rotate(axis, ang):
        """
        # Transformation Matrix corresponding to a rotation about x,y or z
        eg. T = rot('x', pi / 4): rotate pi/4 rad about x axis
        """
        cood = {'x': 0, 'y': 1, 'z': 2}
        u = np.zeros((3, ), dtype=np.float64)
        u[cood[axis]] = 1.0
        return np.asmatrix(pin.AngleAxis(ang, u).matrix())

    def rpyToMatrix(rpy):
        """
        # Convert from Roll, Pitch, Yaw to transformation Matrix
        """
Ejemplo n.º 6
0
import os
import torch
from pkg_resources import parse_version as version

if version(torch.__version__) > version('1.0.1'):
    from torch.utils.cpp_extension import BuildExtension as create_extension
else:
    from torch.utils.ffi import create_extension

this_file = os.path.dirname(__file__)

sources = []
headers = []
defines = []
with_cuda = False

if torch.cuda.is_available():
    print('Including CUDA code.')
    sources += ['pytorch_fft/src/th_fft_cuda.c']
    headers += ['pytorch_fft/src/th_fft_cuda.h']
    defines += [('WITH_CUDA', None)]
    with_cuda = True

ffi = create_extension(
    'pytorch_fft._ext.th_fft',
    package=True,
    headers=headers,
    sources=sources,
    define_macros=defines,
    relative_to=__file__,
    with_cuda=with_cuda,