Exemplo n.º 1
0
    def compile(self):
        if self.ptx is None:
            from pynvrtc.compiler import Program
            program = Program(kernel, 'recurrent_forget_mult.cu')
            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            from cupy.cuda import function
            m = function.Module()
            m.load(bytes(self.ptx.encode()))

            # Forward ForgetMult
            self.forget_mult = m.get_function('recurrent_forget_mult')
            self.forget_mult_bwd = m.get_function('recurrent_forget_mult_bwd')

            # Backward ForgetMult
            self.bwd_forget_mult = m.get_function('recurrent_bwd_forget_mult')
            self.bwd_forget_mult_bwd = m.get_function(
                'recurrent_bwd_forget_mult_bwd')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                self.forget_mult, self.forget_mult_bwd, self.bwd_forget_mult,
                self.bwd_forget_mult_bwd, self.stream)

        self.forget_mult, self.forget_mult_bwd, self.bwd_forget_mult, self.bwd_forget_mult_bwd, self.stream = GPUForgetMult.configured_gpus[
            torch.cuda.current_device()]
Exemplo n.º 2
0
    def __call__(self, points, indices):
        size = points.size()
        if size in self.cached_functions:
            func = self.cached_functions[size]
        else:
            kernel = open('utils/cuda/distance.cu', 'r').read()
            kernel = Template(kernel).substitute(BATCH_SIZE=size[0], NUM_ROWS=size[1])

            program = Program(kernel, 'distance.cu')
            ptx = program.compile()

            m = function.Module()
            m.load(bytes(ptx.encode()))

            func = m.get_function('distance')
            self.cached_functions[size] = func

        indices = indices.contiguous()

        col_ind = indices.new(indices.size(1))
        col_ptr = indices.new(size[0], size[1] + 1)
        col_ptr.fill_(indices.size(1))
        col_ptr.fill_(0)

        grid = ((indices.size(1) + 1024 - 1) // 1024, 1, 1)
        block = (1024, 1, 1)
        func(grid=grid, block=block,
             args=[indices.data_ptr(), col_ind.data_ptr(),
                   col_ptr.data_ptr(), indices.size(1)],
             stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))

        return col_ind, col_ptr
Exemplo n.º 3
0
def compile_with_cache(source, options=(), arch=None, cache_dir=None):
    global _empty_file_preprocess_cache
    if cache_dir is None:
        cache_dir = get_cache_dir()
    if arch is None:
        arch = _get_arch()

    options += ('-ftz=true',)

    env = (arch, options, _get_nvrtc_version())
    if '#include' in source:
        pp_src = '%s %s' % (env, _preprocess(source, options))
    else:
        base = _empty_file_preprocess_cache.get(env, None)
        if base is None:
            base = _empty_file_preprocess_cache[env] = _preprocess('', options)
        pp_src = '%s %s %s' % (env, base, source)

    pp_src = pp_src.encode('utf-8')
    name = '%s_2.cubin' % hashlib.md5(pp_src).hexdigest()

    if not os.path.isdir(cache_dir):
        try:
            os.makedirs(cache_dir)
        except OSError:
            if not os.path.isdir(cache_dir):
                raise

    mod = function.Module()
    # To handle conflicts in concurrent situation, we adopt lock-free method
    # to avoid performance degradation.
    path = os.path.join(cache_dir, name)
    if os.path.exists(path):
        with open(path, 'rb') as file:
            data = file.read()
        if len(data) >= 32:
            hash = data[:32]
            cubin = data[32:]
            cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
            if hash == cubin_hash:
                mod.load(cubin)
                return mod

    ptx = compile_using_nvrtc(source, options, arch)
    ls = function.LinkState()
    ls.add_ptr_data(ptx, six.u('cupy.ptx'))
    cubin = ls.complete()
    cubin_hash = six.b(hashlib.md5(cubin).hexdigest())

    # shutil.move is not atomic operation, so it could result in a corrupted
    # file. We detect it by appending md5 hash at the beginning of each cache
    # file. If the file is corrupted, it will be ignored next time it is read.
    with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
        tf.write(cubin_hash)
        tf.write(cubin)
        temp_path = tf.name
    shutil.move(temp_path, path)

    mod.load(cubin)
    return mod
Exemplo n.º 4
0
    def compile_functions(self):
        device = torch.cuda.current_device()
        print("RRNN loaded for gpu {}".format(device))
        mod = function.Module()
        mod.load(bytes(self._RRNN_PTX.encode()))

        if self.semiring.type == 0:
            fwd_func = mod.get_function("rrnn_fwd")
            bwd_func = mod.get_function("rrnn_bwd")
            Stream = namedtuple("Stream", ["ptr"])
            current_stream = Stream(
                ptr=torch.cuda.current_stream().cuda_stream)
            self._DEVICE2FUNC[device] = (
                current_stream,
                fwd_func,
                bwd_func,
            )
            return current_stream, fwd_func, bwd_func
        else:
            fwd_func = mod.get_function("rrnn_semiring_fwd")
            bwd_func = mod.get_function("rrnn_semiring_bwd")
            Stream = namedtuple("Stream", ["ptr"])
            current_stream = Stream(
                ptr=torch.cuda.current_stream().cuda_stream)
            self._DEVICE2FUNC[device] = (current_stream, fwd_func, bwd_func)
            return current_stream, fwd_func, bwd_func
Exemplo n.º 5
0
    def compile(self):
        if self.ptx is None:

            program = _NVRTCProgram(kernel.encode(),
                                    "recurrent_forget_mult.cu".encode())
            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(self.ptx.encode()))

            self.forget_mult = m.get_function("recurrent_forget_mult")
            self.bwd_forget_mult = m.get_function("bwd_recurrent_forget_mult")

            Stream = namedtuple("Stream", ["ptr"])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                self.forget_mult,
                self.bwd_forget_mult,
                self.stream,
            )

        (
            self.forget_mult,
            self.bwd_forget_mult,
            self.stream,
        ) = GPUForgetMult.configured_gpus[torch.cuda.current_device()]
Exemplo n.º 6
0
def compile_kernel(kernel, filename, functioname):
    program = Program(bytes(kernel, 'ascii'), filename)
    ptx = program.compile()

    m = function.Module()
    m.load(bytes(ptx.encode()))

    f = m.get_function(functioname)
    return f
Exemplo n.º 7
0
    def compile_functions(self):
        device = torch.cuda.current_device()
        mod = function.Module()
        mod.load(bytes(self._UnICORNN_PTX.encode()))
        fwd_func = mod.get_function('unicornn_fwd')
        bwd_func = mod.get_function('unicornn_bwd')

        Stream = namedtuple('Stream', ['ptr'])
        current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

        self._DEVICE2FUNC[device] = (current_stream, fwd_func, bwd_func)
        return current_stream, fwd_func, bwd_func
Exemplo n.º 8
0
def compile_with_cache(source, options=(), arch=None, cache_dir=None):
    global _empty_file_preprocess_cache
    if cache_dir is None:
        cache_dir = get_cache_dir()
    if arch is None:
        arch = _get_arch()

    if 'win32' == sys.platform:
        options += ('-Xcompiler', '/wd 4819')
        if sys.maxsize == 9223372036854775807:
            options += '-m64',
        elif sys.maxsize == 2147483647:
            options += '-m32',

    env = (arch, options, _get_nvcc_version())
    if '#include' in source:
        pp_src = '%s %s' % (env, preprocess(source, options))
    else:
        base = _empty_file_preprocess_cache.get(env, None)
        if base is None:
            base = _empty_file_preprocess_cache[env] = preprocess('', options)
        pp_src = '%s %s %s' % (env, base, source)

    if isinstance(pp_src, six.text_type):
        pp_src = pp_src.encode('utf-8')
    name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()

    mod = function.Module()

    if not os.path.isdir(cache_dir):
        try:
            os.makedirs(cache_dir)
        except OSError:
            if not os.path.isdir(cache_dir):
                raise

    lock_path = os.path.join(cache_dir, 'lock_file.lock')

    path = os.path.join(cache_dir, name)
    with filelock.FileLock(lock_path) as lock:
        if os.path.exists(path):
            with open(path, 'rb') as file:
                cubin = file.read()
        else:
            lock.release()
            cubin = nvcc(source, options, arch)
            lock.acquire()
            with open(path, 'wb') as cubin_file:
                cubin_file.write(cubin)

    mod.load(cubin)

    return mod
Exemplo n.º 9
0
def _initial_cupy(name="indices_slice"):
    program = Program(kernel.encode(), (name + '.cu').encode())
    ptx = program.compile()

    m = function.Module()
    m.load(bytes(ptx.encode()))

    kernel_function = m.get_function(name)

    Stream = namedtuple('Stream', ['ptr'])
    s = Stream(ptr=torch.cuda.current_stream().cuda_stream)
    return kernel_function, s
Exemplo n.º 10
0
    def backward(ctx, grad_h):
        ###################
        if ctx.ptx is None:
            program = Program(kernel.encode(),
                              'recurrent_forget_mult.cu'.encode())

            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(ctx.ptx.encode()))

            ctx.forget_mult = m.get_function('recurrent_forget_mult')
            ctx.bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            ctx.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                ctx.forget_mult, ctx.bwd_forget_mult, ctx.stream)

        ctx.forget_mult, ctx.bwd_forget_mult, ctx.stream = GPUForgetMult.configured_gpus[
            torch.cuda.current_device()]
        #################
        f, x, hidden_init = ctx.saved_tensors
        h = ctx.result
        ###
        seq_size, batch_size, hidden_size = f.size()
        # Zeroing is not necessary as these will be overwritten
        grad_f = f.new(*f.size())
        grad_x = f.new(*f.size())
        grad_h_init = f.new(batch_size, hidden_size)
        ###
        grid_hidden_size = min(hidden_size, 512)
        grid = (math.ceil(hidden_size / grid_hidden_size), batch_size)
        ctx.bwd_forget_mult(grid=grid,
                            block=(grid_hidden_size, 1),
                            args=[
                                h.data_ptr(),
                                f.data_ptr(),
                                x.data_ptr(),
                                grad_h.data_ptr(),
                                grad_f.data_ptr(),
                                grad_x.data_ptr(),
                                grad_h_init.data_ptr(), seq_size, batch_size,
                                hidden_size
                            ],
                            stream=ctx.stream)
        ###
        if hidden_init is not None:
            return grad_f, grad_x, grad_h_init
        return grad_f, grad_x
    def compile_functions():
        device = torch.cuda.current_device()
        mod = cuda_function.Module()
        mod.load(bytes(IndRNN_Compute_GPU._IndRNN_PTX.encode()))
        fwd_func = mod.get_function('indrnn_fwd')
        bwd_func = mod.get_function('indrnn_bwd')

        Stream = namedtuple('Stream', ['ptr'])
        current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

        IndRNN_Compute_GPU._DEVICE2FUNC[device] = (current_stream, fwd_func,
                                                   bwd_func)
        return current_stream, fwd_func, bwd_func
    def compile_functions(self):
        device = torch.cuda.current_device()
        print('IndRNN loaded for gpu {}'.format(device))
        mod = function.Module()
        mod.load(bytes(self._IndRNN_PTX.encode()))
        fwd_func = mod.get_function('indrnn_fwd')
        bwd_func = mod.get_function('indrnn_bwd')

        Stream = namedtuple('Stream', ['ptr'])
        current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

        self._DEVICE2FUNC[device] = (current_stream, fwd_func, bwd_func)
        return current_stream, fwd_func, bwd_func
Exemplo n.º 13
0
    def compile():
        program = Program(kernel.encode(), 'recurrent_forget_mult.cu'.encode())
        ptx = program.compile()

        m = function.Module()
        m.load(bytes(ptx.encode()))

        GPUForgetMult.forget_mult = m.get_function('recurrent_forget_mult')
        GPUForgetMult.bwd_forget_mult = m.get_function(
            'bwd_recurrent_forget_mult')

        Stream = namedtuple('Stream', ['ptr'])
        GPUForgetMult.stream = Stream(
            ptr=torch.cuda.current_stream().cuda_stream)
Exemplo n.º 14
0
    def compile_functions(self):
        device = torch.cuda.current_device()
        logger.info('SRU loaded for gpu {}'.format(device))
        mod = function.Module()
        mod.load(bytes(SRU_PTX.encode()))
        fwd_func = mod.get_function('sru_fwd')
        bwd_func = mod.get_function('sru_bwd')
        bifwd_func = mod.get_function('sru_bi_fwd')
        bibwd_func = mod.get_function('sru_bi_bwd')

        current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

        self._DEVICE2FUNC[device] = (current_stream, fwd_func, bifwd_func,
                                     bwd_func, bibwd_func)
        return current_stream, fwd_func, bifwd_func, bwd_func, bibwd_func
Exemplo n.º 15
0
    def compile(self):
        # Create program
        program = Program(self.kernel, self.title)

        # Compile program
        arch = "-arch={0}".format(cupyKernel.get_compute_arch())
        ptx = program.compile([arch])

        # Load Program
        m = function.Module()
        m.load(bytes(ptx.encode()))

        # Get Function Pointer
        self.func = m.get_function(self.func_name)
        self.compiled = True
Exemplo n.º 16
0
def smooth_local_affine(output_cpu, input_cpu, epsilon, patch, h, w, f_r, f_e):
    # program = Program(src.encode('utf-8'), 'best_local_affine_kernel.cu'.encode('utf-8'))
    # ptx = program.compile(['-I/usr/local/cuda/include'.encode('utf-8')])
    program = Program(src, 'best_local_affine_kernel.cu')
    ptx = program.compile(['-I/home/jsy/software/cuda/cuda-10.1/include'])
    # ptx = program.compile(['-I/usr/local/cuda/include'])
    m = function.Module()
    m.load(bytes(ptx.encode()))

    _reconstruction_best_kernel = m.get_function('reconstruction_best_kernel')
    _bilateral_smooth_kernel = m.get_function('bilateral_smooth_kernel')
    _best_local_affine_kernel = m.get_function('best_local_affine_kernel')
    Stream = namedtuple('Stream', ['ptr'])
    s = Stream(ptr=torch.cuda.current_stream().cuda_stream)

    filter_radius = f_r
    sigma1 = filter_radius / 3
    sigma2 = f_e
    radius = (patch - 1) / 2

    filtered_best_output = torch.zeros(np.shape(input_cpu)).cuda()
    affine_model = torch.zeros((h * w, 12)).cuda()
    filtered_affine_model = torch.zeros((h * w, 12)).cuda()

    input_ = torch.from_numpy(input_cpu).cuda()
    output_ = torch.from_numpy(output_cpu).cuda()
    _best_local_affine_kernel(
        grid=(int((h * w) / 256 + 1), 1),
        block=(256, 1, 1),
        args=[output_.data_ptr(), input_.data_ptr(), affine_model.data_ptr(),
              np.int32(h), np.int32(w), np.float32(epsilon), np.int32(radius)], stream=s
    )

    _bilateral_smooth_kernel(
        grid=(int((h * w) / 256 + 1), 1),
        block=(256, 1, 1),
        args=[affine_model.data_ptr(), filtered_affine_model.data_ptr(), input_.data_ptr(), np.int32(h), np.int32(w),
              np.int32(f_r), np.float32(sigma1), np.float32(sigma2)], stream=s
    )

    _reconstruction_best_kernel(
        grid=(int((h * w) / 256 + 1), 1),
        block=(256, 1, 1),
        args=[input_.data_ptr(), filtered_affine_model.data_ptr(), filtered_best_output.data_ptr(),
              np.int32(h), np.int32(w)], stream=s
    )
    numpy_filtered_best_output = filtered_best_output.cpu().numpy()
    return numpy_filtered_best_output
Exemplo n.º 17
0
    def forward(ctx, f, x, hidden_init=None):
        ctx.ptx = GPUForgetMult.ptx
        ctx.configured_gpus = GPUForgetMult.configured_gpus
        # the self.compile function
        ##################
        if ctx.ptx is None:
            program = Program(kernel.encode(),
                              'recurrent_forget_mult.cu'.encode())
            GPUForgetMult.ptx = program.compile()
            ctx.ptx = GPUForgetMult.ptx

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(ctx.ptx.encode()))

            ctx.forget_mult = m.get_function('recurrent_forget_mult')
            ctx.bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            ctx.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                ctx.forget_mult, ctx.bwd_forget_mult, ctx.stream)

        ctx.forget_mult, ctx.bwd_forget_mult, ctx.stream = GPUForgetMult.configured_gpus[
            torch.cuda.current_device()]
        ###################
        seq_size, batch_size, hidden_size = f.size()
        result = f.new(seq_size + 1, batch_size, hidden_size)
        # We only zero the result array (result[0]) if we don't set a hidden initial state
        # All other values (result[1:]) are overwritten by default
        if hidden_init is not None: result[0, :, :] = hidden_init
        else: result = result.zero_()
        ###
        grid_hidden_size = min(hidden_size, 512)
        grid = (math.ceil(hidden_size / grid_hidden_size), batch_size)
        ctx.forget_mult(grid=grid,
                        block=(grid_hidden_size, 1),
                        args=[
                            result.data_ptr(),
                            f.data_ptr(),
                            x.data_ptr(), seq_size, batch_size, hidden_size
                        ],
                        stream=ctx.stream)
        ctx.save_for_backward(f, x, hidden_init)
        ctx.result = result
        return result[1:, :, :]
Exemplo n.º 18
0
    def _cuda_get_module(self):
        if SRUFunction._cuda_module is not None:
            return SRUFunction._cuda_module

        SRUFunction._cuda_module = function.Module()

        if cupy_version == 1:
            SRUFunction._cuda_module.load(CUDA_SRU_PTX)
            return SRUFunction._cuda_module

        if cupy_version == 2:
            ls = function.LinkState()
            ls.add_ptr_data(CUDA_SRU_PTX, u"cupy.ptx")
            SRUFunction._cuda_module.load(ls.complete())
            return SRUFunction._cuda_module

        raise NotImplementedError()
Exemplo n.º 19
0
    def compile(cls):
        """Compiles forward and backward GPU kernels for uni- and bi-directional
        SRU. Assumes there is only one GPU.
        """
        if cls._STREAM is not None:
            return

        prog = Program(SRU_CODE.encode(), 'sru_prog.cu'.encode())
        ptx = prog.compile()
        mod = function.Module()
        mod.load(bytes(ptx.encode()))
        cls._FWD_FUNC = mod.get_function('sru_fwd')
        cls._BWD_FUNC = mod.get_function('sru_bwd')
        cls._BiFWD_FUNC = mod.get_function('sru_bi_fwd')
        cls._BiBWD_FUNC = mod.get_function('sru_bi_bwd')

        Stream = namedtuple('Stream', ['ptr'])
        cls._STREAM = Stream(ptr=torch.cuda.current_stream().cuda_stream)
Exemplo n.º 20
0
    def compile(self):
        if self.ptx is None:
            program = Program(kernel.encode(), 'recurrent_forget_mult.cu'.encode())
            GPUReverseForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUReverseForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(self.ptx.encode()))

            self.forget_mult = m.get_function('recurrent_reverse_forget_mult')
            self.bwd_forget_mult = m.get_function('bwd_recurrent_reverse_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUReverseForgetMult.configured_gpus[torch.cuda.current_device()] = (self.forget_mult, self.bwd_forget_mult, self.stream)

        self.forget_mult, self.bwd_forget_mult, self.stream = GPUReverseForgetMult.configured_gpus[torch.cuda.current_device()]
Exemplo n.º 21
0
    def compile(self):
        if self.ptx is None:
            program = Program(kernel, 'recurrent_forget_mult.cu', lib_name="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.1\\bin\\nvrtc64_101_0.dll")
            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(self.ptx.encode()))

            self.forget_mult = m.get_function('recurrent_forget_mult')
            self.bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
            self.forget_mult, self.bwd_forget_mult, self.stream)

        self.forget_mult, self.bwd_forget_mult, self.stream = GPUForgetMult.configured_gpus[torch.cuda.current_device()]
Exemplo n.º 22
0
    def compile(self):
        if GPULayerNorm.ptx is None:
            program = Program(kernel.encode(), 'layer_norm.cu'.encode())
            GPULayerNorm.ptx = program.compile()

        if torch.cuda.current_device() not in GPULayerNorm.configured_gpus:
            m = function.Module()
            m.load(bytes(GPULayerNorm.ptx.encode()))

            self.layer_norm = m.get_function('layer_norm')
            self.bwd_layer_norm = m.get_function('bwd_layer_norm')
            self.bwd_mults = m.get_function('bwd_multiplications')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPULayerNorm.configured_gpus[torch.cuda.current_device()] = (self.layer_norm, self.bwd_layer_norm, self.bwd_mults, self.stream)

        self.layer_norm, self.bwd_layer_norm, self.bwd_mults, self.stream = GPULayerNorm.configured_gpus[torch.cuda.current_device()]
Exemplo n.º 23
0
    def compile():
        if GPUForgetMult.ptx is None:
            program = Program(kernel, 'recurrent_forget_mult.cu')
            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(GPUForgetMult.ptx.encode()))

            forget_mult = m.get_function('recurrent_forget_mult')
            bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                forget_mult, bwd_forget_mult, stream)

        return GPUForgetMult.configured_gpus[torch.cuda.current_device()]
Exemplo n.º 24
0
    def __call__(self, values, col_ind, col_ptr, size, dense):
        func_id = (size[0], size[1], size[2], dense.size(1), dense.size(2))
        if func_id in self.cached_functions:
            func = self.cached_functions[func_id]
        else:
            kernel = open('utils/cuda/sparse_bmm.cu', 'r').read()
            kernel = Template(kernel).substitute(BATCH_SIZE=size[0],
                                                 SPARSE_NUM_ROWS=size[1],
                                                 SPARSE_NUM_COLS=size[2],
                                                 DENSE_NUM_ROWS=dense.size(1),
                                                 DENSE_NUM_COLS=dense.size(2))

            program = Program(kernel, 'sparse_bmm.cu')
            ptx = program.compile()

            m = function.Module()
            m.load(bytes(ptx.encode()))

            func = m.get_function('sparse_bmm')
            self.cached_functions[func_id] = func

        values = values.contiguous()
        col_ind = col_ind.contiguous()
        col_ptr = col_ptr.contiguous()
        dense = dense.contiguous()
        result = values.new(size[0], size[1], dense.size(2))
        block = (8, 8, 8)

        grid = tuple([(result.size(i) + block[i] - 1) // block[i]
                      for i in range(3)])
        func(grid=grid,
             block=block,
             args=[
                 result.data_ptr(),
                 values.data_ptr(),
                 col_ind.data_ptr(),
                 col_ptr.data_ptr(),
                 dense.data_ptr()
             ],
             stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))

        return result
Exemplo n.º 25
0
    def compile(self):
        if self.ptx is None:
            program = Program(kernel, 'relu.cu')
            GPUReLUF.ptx = program.compile()

        if torch.cuda.current_device() not in GPUReLUF.configured_gpus:
            m = function.Module()
            m.load(bytes(self.ptx))

            self.relu_forward = m.get_function('relu_forward')
            self.relu_backward = m.get_function('relu_backward')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUReLUF.configured_gpus[torch.cuda.current_device()] = (
                self.relu_forward, self.relu_backward, self.stream)

        self.relu_forward, self.relu_backward, self.stream = GPUReLUF.configured_gpus[
            torch.cuda.current_device()]
Exemplo n.º 26
0
    def compile(self):
        if self.ptx is None:
            #program = Program(kernel.encode(), 'recurrent_forget_mult.cu'.encode())
            # https://github.com/salesforce/pytorch-qrnn/issues/26
            program = Program(kernel, 'recurrent_forget_mult.cu')
            GPUForgetMult.ptx = program.compile()

        if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:
            m = function.Module()
            m.load(bytes(self.ptx.encode()))

            self.forget_mult = m.get_function('recurrent_forget_mult')
            self.bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')

            Stream = namedtuple('Stream', ['ptr'])
            self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)

            GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (
                self.forget_mult, self.bwd_forget_mult, self.stream)

        self.forget_mult, self.bwd_forget_mult, self.stream = GPUForgetMult.configured_gpus[
            torch.cuda.current_device()]
Exemplo n.º 27
0
def _build_cuda_kernels():
    global _apply_disparity_func_pos
    global _apply_disparity_func_neg

    _apply_disparity_pos_kernel = '''
    extern "C" {
        __global__ void apply_disparity_pos(
        float *dst, const float *src, const int *disp, int h, int w, int c, int total_l) {
            int i = blockIdx.x * blockDim.x + threadIdx.x;
            if (i >= total_l)
                return;
            int dbase = (i/h/c*h+i%h)*w;
            for (int j = w - 1; j >=0; j--) {
                int idx = j + disp[dbase+j];
                if (idx < w)
                    dst[i*w+idx] = src[i*w+j];
            }
        }
        __global__ void apply_disparity_neg(
        float *dst, const float *src, const int *disp, int h, int w, int c, int total_l) {
            int i = blockIdx.x * blockDim.x + threadIdx.x;
            if (i >= total_l)
                return;
            int dbase = (i/h/c*h+i%h)*w;
            for (int j = 0; j < w; j++) {
                int idx = j + disp[dbase+j];
                if (idx > -1)
                    dst[i*w+idx] = src[i*w+j];
            }
        }
    }
    '''
    program = Program(
        _apply_disparity_pos_kernel, 'apply_disparity.cu')
    m = function.Module()
    m.load(bytes(program.compile().encode()))
    _apply_disparity_func_pos = m.get_function('apply_disparity_pos')
    _apply_disparity_func_neg = m.get_function('apply_disparity_neg')
Exemplo n.º 28
0
def load_sru_mod():
    global SRU_FWD_FUNC, SRU_BWD_FUNC, SRU_BiFWD_FUNC, SRU_BiBWD_FUNC
    global SRU_STREAM
    if check_sru_requirement():
        from cupy.cuda import function
        from pynvrtc.compiler import Program

        # This sets up device to use.
        device = torch.device("cuda")
        tmp_ = torch.rand(1, 1).to(device)

        sru_prog = Program(SRU_CODE.encode('utf-8'),
                           'sru_prog.cu'.encode('utf-8'))
        sru_ptx = sru_prog.compile()
        sru_mod = function.Module()
        sru_mod.load(bytes(sru_ptx.encode()))

        SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
        SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
        SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
        SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')

        stream = namedtuple('Stream', ['ptr'])
        SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
Exemplo n.º 29
0
    }
}
"""


if check_sru_requirement():
    from cupy.cuda import function
    from pynvrtc.compiler import Program

    # This cuda() is important, it sets up device to use.
    tmp_ = torch.rand(1, 1).cuda()

    sru_prog = Program(SRU_CODE.encode('utf-8'),
                       'sru_prog.cu'.encode('utf-8'))
    sru_ptx = sru_prog.compile()
    sru_mod = function.Module()
    sru_mod.load(bytes(sru_ptx.encode()))

    SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
    SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
    SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
    SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')

    stream = namedtuple('Stream', ['ptr'])
    SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)


class SRU_Compute(Function):

    def __init__(self, activation_type, d_out, bidirectional=False):
        super(SRU_Compute, self).__init__()
Exemplo n.º 30
0
            cp -= ncols_;
            gup -= ncols_u_;
            gxp -= ncols_x_;
            ghp -= ncols_;
        }

        *(grad_bias + col) = gbias1;
        *(grad_bias + col + ncols) = gbias2;
        *(grad_init +col) = cur;
    }
}
"""

SRU_PROG = Program(SRU_CODE.encode('utf-8'), 'sru_prog.cu'.encode('utf-8'))
SRU_PTX = SRU_PROG.compile()
SRU_MOD = function.Module()
SRU_MOD.load(bytes(SRU_PTX.encode()))
SRU_FWD_FUNC = SRU_MOD.get_function('sru_fwd')
SRU_BWD_FUNC = SRU_MOD.get_function('sru_bwd')
SRU_BiFWD_FUNC = SRU_MOD.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = SRU_MOD.get_function('sru_bi_bwd')

Stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = Stream(ptr=torch.cuda.current_stream().cuda_stream)


class SRU_Compute(Function):
    def __init__(self, activation_type, d_out, bidirectional=False):
        super(SRU_Compute, self).__init__()
        self.activation_type = activation_type
        self.d_out = d_out