def forward(self, x): xp = cuda.get_array_module(x) y = x - x.max(axis=self.axis, keepdims=True) # ここでaxis使うから __init__で定義しておく c = (np.max(x)) # over flow対策 y = exp(x-c) sum_y = sum(y) y = y / sum_y return y.data
def forward(self, gy): xp = cuda.get_array_module(gy) gx = xp.zeros(self.in_shape, dtype=gy.dtype) if xp is np: xp.add.at(gx, self.slices, gy) else: xp.scatter_add(gx, self.slices, gy) return gx
def logsumexp(x, axis=1): xp = cuda.get_array_module(x) m = x.max(axis=axis, keepdims=True) y = x - m xp.exp(y, out=y) s = y.sum(axis=axis, keepdims=True) xp.log(s, out=s) m += s return m
def backward(self, gy): x, t = self.inputs N, CLS_NUM = x.shape gy *= 1/N y = softmax(x) # convert to one-hot xp = cuda.get_array_module(t.data) t_onehot = xp.eye(CLS_NUM, dtype=t.dtype)[t.data] y = (y - t_onehot) * gy return y
def forward(self, x): xp = cuda.get_array_module(x) y = xp.sin(x) return y
def forward(self, x): # print(__class__.__name__) xp = cuda.get_array_module(x) y = xp.exp(x) # print(type(y)) return y
def forward(self, x): xp = cuda.get_array_module(x) y = xp.clip(x, self.x_min, self.x_max) return y
def forward(self, x): xp = cuda.get_array_module(x) y = xp.tanh(x * 0.5) * 0.5 + 0.5 # Better implementation return y
def forward(self, x): self.x_shape = x.shape # もとの入力xのshape xp = cuda.get_array_module(x) y = xp.broadcast_to(x, self.shape) return y
def forward(self, x): xp = cuda.get_array_module(x) y = xp.transpose(x) return y