def _oper_gpu(cls, x, pz, ps, w, wr, b): if ps is None: tmp = GPUValue(shape=(x.shape[0], w.shape[1] // 4)) s_p = tmp.zeros_like_me() z_p = tmp.zeros_like_me() else: s_p = ps z_p = get_gpu(pz) u = dot(x, w) + dot(z_p, wr) if b is not None: u += b z = get_gpu(z_p).empty_like_me() state = get_gpu(s_p).empty_like_me() cu.culstm_forward_activate(get_gpu(u)) cu.culstm_forward(get_gpu(u), get_gpu(state), get_gpu(s_p), get_gpu(z)) ret = cls._create_node(z) ret.attrs._x = x ret.attrs._w = w ret.attrs._wr = wr ret.attrs._b = b ret.attrs._pz = pz ret.attrs._u = u ret.attrs._pstate = s_p ret.attrs._state = state ret._state = state if isinstance(pz, Node): pz.attrs._pfgate = u return ret
def _oper_gpu(cls, x, pz, ps, w, wr, wc, b): if ps is None: s_p = GPUValue(shape=(x.shape[0], w.shape[1] // 4)).zeros_like_me() z_p = s_p.zeros_like_me() else: s_p, z_p = map(get_gpu, (ps, pz)) s = s_p.empty_like_me() u = op.dot(x, w) + op.dot(z_p, wr) if b is not None: u += b u = get_gpu(u) z = z_p.zeros_like_me() cu.cupeepholelstm_forward(u, get_gpu(wc), s_p, s, z) ret = cls._create_node(z) ret.attrs._x = x ret.attrs._w = w ret.attrs._wr = wr ret.attrs._wc = wc ret.attrs._b = b ret.attrs._u = u ret.attrs._pz = pz ret.attrs._pstate = ps ret.attrs._state = s if isinstance(pz, Node): pz.attrs._pfgate = u return ret
def join_grads(self, grads, others): """Merge gradients of other models. Others is a list of tuple of (model, grads) to be merged. Models listed in the others should have same structure with self.""" values = { name: params for name, params, attrs in self.flatten_values() } for model, _grads in others: o = model._get_grads(_grads) for (name, attrname), diff in o.items(): obj = values[name][attrname] curdiff = grads.get(obj, None) if curdiff is not None: if not isinstance(curdiff, Node): curdiff = Node(curdiff) if not isinstance(diff, Node): diff = Node(diff) with use_device(curdiff.device_id): if GPUValue is not None and diff.device_id != curdiff.device_id: g = GPUValue(shape=diff.shape) g.copy_from(diff.get_gpu()) diff = Node(g) newdiff = curdiff + diff grads.set(obj, newdiff)
def _backward_gpu(self, context, dy, **kwargs): lhs = self.attrs._lhs rhs = self.attrs._rhs if isinstance(self.attrs._lhs, Node): new_shape = lhs.shape ldx = GPUValue(shape=new_shape) cublas_gemm(get_gpu(dy), 0, get_gpu(rhs), 1, get_gpu(ldx)) self.attrs._lhs._update_diff(context, ldx, **kwargs) if isinstance(self.attrs._rhs, Node): new_shape = rhs.shape rdx = GPUValue(shape=new_shape) cublas_gemm(get_gpu(lhs), 1, get_gpu(dy), 0, get_gpu(rdx)) self.attrs._rhs._update_diff(context, rdx, **kwargs)
def _oper_gpu(cls, x, rois, ch, h, w, n_rois, outh, outw, spatial_scale): z = GPUValue(shape=(n_rois, ch, outh, outw)) argmax_data = z.empty_like_me() rois = get_gpu(rois) cu.curoi_pool2d_forward(rois, get_gpu(x), spatial_scale, ch, h, w, outh, outw, z, argmax_data) ret = cls._create_node(z) ret.attrs._index = argmax_data ret.attrs._x = x ret.attrs._rois = rois ret.attrs._outh = outh ret.attrs._outw = outw ret.attrs._spatial_scale = spatial_scale return ret
def _oper_gpu(cls, x, w, b, in_shape, out_shape, kernel, stride, padding, dilation): N = x.shape[0] conv_desc = cu.ConvolutionDescriptor(padding, stride, dilation, precision) filter_desc = cu.FilterDescriptor(w.shape, precision) y = GPUValue(shape=tuple([ N, ] + list(out_shape))) with cu.cudnn_handler() as handle: cu.cuConvolutionForward(handle, conv_desc, filter_desc, get_gpu(x), get_gpu(w), y) if b is not None: cu.cu_add_bias(get_gpu(b), y) # assert type(x) is not np.ndarray ret = cls._create_node(y) ret.attrs._conv_desc = conv_desc ret.attrs._filter_desc = filter_desc ret.attrs._x = x ret.attrs._w = w ret.attrs._b = b ret.attrs._in_shape = in_shape ret.attrs._out_shape = out_shape ret.attrs._kernel = kernel ret.attrs._stride = stride ret.attrs._padding = padding ret.attrs._dilation = dilation return ret
def _oper_gpu(cls, x, w): z = GPUValue(shape=(len(x), len(w[0]))) cu.cuembedding_forward(get_gpu(x), get_gpu(w), z) ret = cls._create_node(z) ret.attrs._x = x ret.attrs._w = w return ret
def _oper_gpu(cls, x, w, b, in_shape, kernel, stride, padding): conv_desc = cu.ConvolutionNDescriptor(padding, stride, precision) filter_desc = cu.NdFilterDescriptor(w.shape, precision) output_shape = [x.shape[0], w.shape[0]] for i in range(len(x.shape[2:])): output_shape.append( (x.shape[i + 2] + padding[i] * 2 - kernel[i]) // stride[i] + 1) y = GPUValue(shape=tuple(output_shape)) with cu.cudnn_handler() as handle: cu.cuConvolutionForward(handle, conv_desc, filter_desc, get_gpu(x), get_gpu(w), y) if b is not None: cu.cu_add_bias(get_gpu(b), y) # assert type(x) is not np.ndarray ret = cls._create_node(y) ret.attrs._conv_desc = conv_desc ret.attrs._filter_desc = filter_desc ret.attrs._x = x ret.attrs._w = w ret.attrs._b = b return ret
def _oper_gpu(cls, arg, axis=None, keepdims=False): if isinstance(axis, (int, tuple, type(None))): if isinstance(axis, tuple): size = 1 for r in range(len(arg.shape)): if r in axis: size *= arg.shape[r] else: size = np.size(arg, axis) if not keepdims: if axis is None: newshape = () elif isinstance(axis, tuple): temp_l = [] for r in range(len(arg.shape)): if r not in axis: temp_l.append(arg.shape[r]) newshape = tuple(temp_l) else: newshape = arg.shape[:axis] + arg.shape[axis + 1:] else: axis_list = list(arg.shape) if axis is None: newshape = tuple([1 for e in list(axis_list)]) elif isinstance(axis, tuple): for e in axis: axis_list[e] = 1 newshape = tuple(axis_list) else: axis_list[axis] = 1 newshape = tuple(axis_list) ret = GPUValue(shape=newshape) cudiv(cusum(get_gpu(arg), axis=axis, keepdims=keepdims), size, ret) return ret
def _oper_gpu(cls, lhs, rhs): new_shape = (lhs.shape[0], rhs.shape[1]) ret = GPUValue(shape=new_shape) cublas_gemm(get_gpu(lhs), 0, get_gpu(rhs), 0, get_gpu(ret)) return ret
def _oper_gpu(cls, args, axis): newshape = args[0].shape[:axis] + \ (np.sum([a.shape[axis] for a in args]), ) + args[0].shape[axis + 1:] ret = GPUValue(shape=newshape) cuconcat([get_gpu(a) for a in args], ret, axis) return ret
def _backward_gpu(self, context, dy, **kwargs): if isinstance(self.attrs._x, Node): ch, h, w = self.attrs._x.shape[1:] dx = GPUValue(shape=self.attrs._x.shape) cu.curoi_pool2d_backward(get_gpu(dy), self.attrs._index, self.attrs._rois, self.attrs._spatial_scale, ch, h, w, self.attrs._outh, self.attrs._outw, dx) self.attrs._x._update_diff(context, dx, **kwargs)
def _oper_gpu(cls, x, drop_out_ratio): shape = (x.shape[0], x.shape[1], 1, 1) mask = GPUValue(shape=shape) curand_generator().rand_bernoulli(mask, 1 - drop_out_ratio) mask = mask / drop_out_ratio mask = mask * get_gpu(x).ones_like_me() value = get_gpu(x) * get_gpu(mask) ret = cls._create_node(value) ret.attrs._x = x ret.attrs._mask = mask return ret
def _oper_gpu(cls, x, prev_pool): dx = GPUValue(shape=prev_pool.attrs._x.shape) with cu.cudnn_handler() as handle: cu.cuPoolingBackward(handle, prev_pool.attrs._pool_desc, get_gpu( prev_pool.attrs._x), get_gpu(prev_pool), get_gpu(x), dx) ret = cls._create_node(dx) ret.attrs._x = x ret.attrs._original_x = prev_pool.attrs._x ret.attrs._kernel = prev_pool.attrs._kernel ret.attrs._stride = prev_pool.attrs._stride ret.attrs._padding = prev_pool.attrs._padding return ret
def _oper_gpu(cls, x, in_shape, out_shape, karnel, stride, padding): N = x.shape[0] pool_desc = cu.PoolingDescriptor(karnel, padding, stride, pool_mode=1) y = GPUValue(shape=tuple([ N, ] + list(out_shape))) with cu.cudnn_handler() as handle: cu.cuPoolingForward(handle, pool_desc, get_gpu(x), y) ret = cls._create_node(y) ret.attrs._pool_desc = pool_desc ret.attrs._kernel = karnel ret.attrs._stride = stride ret.attrs._padding = padding ret.attrs._x = x return ret
def _oper_gpu(cls, x, karnel, stride, padding): pool_desc = cu.PoolingNDescriptor(karnel, padding, stride, pool_mode=1) output_shape = [x.shape[0], x.shape[1]] for i in range(len(x.shape[2:])): output_shape.append( (x.shape[i + 2] + padding[i] * 2 - karnel[i]) // stride[i] + 1) y = GPUValue(shape=tuple(output_shape)) with cu.cudnn_handler() as handle: cu.cuPoolingForward(handle, pool_desc, get_gpu(x), get_gpu(y)) ret = cls._create_node(y) ret.attrs._pool_desc = pool_desc ret.attrs._kernel = karnel ret.attrs._stride = stride ret.attrs._padding = padding ret.attrs._x = x return ret
def _backward_gpu(self, context, dy, **kwargs): n, m = dy.shape w = self.attrs._w wr = self.attrs._wr wc = self.attrs._wc b = self.attrs._b u = self.attrs._u s = self.attrs._state ps = get_gpu(s).zeros_like_me( ) if self.attrs._pstate is None else self.attrs._pstate dot = context.restore(w, get_gpu(dy).zeros_like_me()) drt = context.restore(wr, get_gpu(u).zeros_like_me()) pfg = self.attrs.get("_pfgate", get_gpu(u).zeros_like_me()) dr = get_gpu(drt).empty_like_me() dwc = GPUValue(shape=(n, m * 3)) dou = get_gpu(dot).empty_like_me() cu.cupeepholelstm_backward( *map(get_gpu, (u, ps, s, pfg, wc, dy, drt, dot, dr, dou, dwc))) context.store(wr, dr) context.store(w, dou) if isinstance(self.attrs._x, Node): dx = op.dot(dr, w.T) self.attrs._x._update_diff(context, dx) if isinstance(w, Node): w._update_diff(context, op.dot(self.attrs._x.T, dr)) if isinstance(wr, Node): wr._update_diff(context, op.dot(self.T, drt)) if isinstance(wc, Node): wc._update_diff(context, op.sum(dwc, axis=0)) if isinstance(b, Node): b._update_diff(context, op.sum(dr, axis=0)) if isinstance(self.attrs._pz, Node): self.attrs._pz._update_diff(context, op.dot(dr, wr.T))
def _oper_gpu(cls, arg, axis=None, keepdims=False): if isinstance(axis, (int, type(None))): size = np.size(arg, axis) if not keepdims: if axis is None: newshape = () else: newshape = arg.shape[:axis] + arg.shape[axis + 1:] else: axis_list = list(arg.shape) if axis is None: newshape = tuple([1 for e in list(axis_list)]) else: axis_list[axis] = 1 newshape = tuple(axis_list) ret = GPUValue(shape=newshape) cudiv(cusum(get_gpu(arg), axis=axis, keepdims=keepdims), size, ret) return ret
def _oper_gpu(cls, x, w, b, in_shape, out_shape, kernel, stride, padding, dilation): conv_desc = cu.ConvolutionDescriptor(padding, stride, dilation, precision) filter_desc = cu.FilterDescriptor(w.shape, precision) N = x.shape[0] z = GPUValue(shape=tuple([ N, ] + list(out_shape))) with cu.cudnn_handler() as handle: cu.cuConvolutionBackwardData(handle, conv_desc, filter_desc, get_gpu(w), get_gpu(x), z) if b is not None: cu.cu_add_bias(get_gpu(b), z) ret = cls._create_node(z) ret.attrs._conv_desc = conv_desc ret.attrs._filter_desc = filter_desc ret.attrs._x = x ret.attrs._w = w ret.attrs._b = b return ret
def _oper_gpu(cls, arg): ret = GPUValue(shape=arg.shape) cupow(get_gpu(arg), 2, ret) return ret
def _oper_gpu(cls, condition, a, b): a_cpu = getattr(get_gpu(a), "new_array()", a) b_cpu = getattr(get_gpu(b), "new_array()", b) ret = GPUValue(np.where(condition, a_cpu, b_cpu)) return ret