def dropout(x, dropout_ratio=0.5): x = as_variable(x) if dezero.Config.train: xp = cuda.get_array_module(x) mask = xp.random.rand(*x.shape) > dropout_ratio scale = xp.array(1.0 - dropout_ratio).astype(x.dtype) y = x * mask / scale return y else: return x
def backward(self, gy): x, t = self.inputs N, CLS_NUM = x.shape gy *= 1 / N y = softmax(x) # Convert to one-hot #xp = cuda.get_array_module(t.data) xp = as_variable(x) t_onehot = xp.eye(CLS_NUM, dtype=t.dtype)[t.data] y = (y - t_onehot) * gy return y
def pooling_simple(x, kernel_size, stride=1, pad=0): x = as_variable(x) N, C, H, W = x.shape KH, KW = pair(kernel_size) PH, PW = pair(pad) SH, SW = pair(stride) OH = get_conv_outsize(H, KH, SH, PH) OW = get_conv_outsize(W, KW, SW, PW) col = im2col(x, kernel_size, stride, pad, to_matrix=True) col = col.reshape(-1, KH * KW) y = col.max(axis=1) y = y.reshape(N, OH, OW, C).transpose(0, 3, 1, 2) return y
def pooling_simple(x, kernel_size, stride=1, pad=0): x = as_variable(x) n, c, h, w = x.shape kh, kw = _pair(kernel_size) ph, pw = _pair(pad) sh, sw = _pair(stride) out_h = (h + ph * 2 - kh) // sh + 1 out_w = (h + pw * 2 - kw) // sw + 1 col = im2col(x, kernel_size, stride, pad) col = col.transpose((0, 4, 5, 1, 2, 3)).reshape((-1, kh * kw)) y = col.max(axis=1) y = y.reshape((n, out_h, out_w, c)) y = y.transpose((0, 3, 1, 2)) return y
def sigmoid_simple(x): x = as_variable(x) y = 1 / (1 + exp(-x)) return y
def broadcast_to(x, shape): if x.shape == shape: return as_variable(x) return BroadcastTo(shape)(x)
def sum_to(x, shape): if x.shape == shape: return as_variable(x) return SumTo(shape)(x)
def reshape(x, shape): if x.shape == shape: return as_variable(x) return Reshape(shape)(x)
def average(x, axis=None, keepdims=False): x = as_variable(x) y = sum(x, axis, keepdims) return y * (y.data.size / x.data.size)
def expand_dims(x, axis): x = as_variable(x) shape = list(x.shape) shape.insert(axis, 1) return reshape(x, tuple(shape))
def forward(self, x): x = as_variable(x) y = 1 / (1 + exp(-x)) return y
def accuracy(y, t): y, t = as_variable(y), as_variable(t) pred = y.data.argmax(axis=1).reshape(t.shape) result = (pred == t.data) acc = result.mean() return Variable(as_array(acc))
def test_forward(self): a = as_variable(6.0) b = as_variable(4.0) c = div(a, b) self.assertEqual(c.data, np.array(1.5))
def forward(self, x): #xp = cuda.get_array_module(x) xp = as_variable(x) y = xp.clip(x, self.x_min, self.x_max) return y
def forward(self, x): x = as_variable(x) y = x - x.max(axis=self.axis, keepdims=True) y = x.exp(y) y /= y.sum(axis=self.axis, keepdims=True) return y
def softmax_simple(x, axis=1): x = as_variable(x) y = exp(x) sum_y = sum(y, axis=axis, keepdims=True) return y / sum_y
def mean_squared_error_simple(x0, x1): x0, x1 = as_variable(x0), as_variable(x1) diff = x0 - x1 y = sum(diff**2) / len(diff) return y
def mean_squared_error_simple(x0, x1): x0, x1 = as_variable(x0), as_variable(x1) diff = x0 - x1 return sum(diff**2) / diff.size