def predict(x): y = F.linear(x, W1, b1) y = F.sigmoid(y) y = F.linear(y, W2, b2) #y = F.matmul(x, W1) + b1 #y = F.sigmoid_simple(y) #y = F.matmul(y, W2) + b2 return y
def forward(self, x): # 데이터를 흘려보내는 시점에 가중치 초기화 if self.W.data is None: self.in_size = x.shape[1] self._init_W() y = F.linear(x, self.W, self.b) return y
def forward(self, x): if self.W.data is None: self.in_size = x.shape[1] xp = cuda.get_array_module(x) self._init_W(xp) y = F.linear(x, self.W, self.b) return y
def forward(self, *xs: np.ndarray): x, = xs if self.W.data is None: self.in_size = x.shape[1] self._init_W() y = F.linear(x, self.W, self.b) return y,
def forward(self, x): if self.W.data is None: self.in_size = x.shape[1] self._init_W() y = F.linear(x, self.W, self.b) return y
def test_forward2(self): x = np.array([[1, 2, 3], [4, 5, 6]]).astype('f') W = x.T b = None y = F.linear(x, W, b) cy = chainer.functions.linear(x, W.T) self.assertTrue(array_allclose(y.data, cy.data))
def test_backward_WO_bias(self): x = Variable(np.array([[1, 2, 3], [6, 7, 8]])) W = Variable(np.arange(1, 7).reshape(3, 2)) y = linear(x, W) y.backward() assert_equal(x.grad.data, np.ones((2, 2)) @ W.data.T) assert_equal(W.grad.data, x.data.T @ np.ones((2, 2))) self.assertIsNone(y.creator.inputs[2].grad)
def forward(self, x): # データを流すタイミングで重みを初期化 if self.W.data is None: self.in_size = x.shape[1] self._init_W() y = F.linear(x, self.W, self.b) return y
def forward(self, x): # initialize weights when data is injected if self.W.data is None: self.in_size = x.shape[1] xp = cuda.get_array_module(x) self._init_W(xp) y = F.linear(x, self.W, self.b) return y
def test_backward_W_bias(self): x = Variable(np.array([[1, 2, 3], [6, 7, 8]])) W = Variable(np.arange(1, 7).reshape(3, 2)) b = Variable(np.array(5)) y = linear(x, W, b) y.backward() assert_equal(x.grad.data, np.ones((2, 2)) @ W.data.T) assert_equal(W.grad.data, x.data.T @ np.ones((2, 2))) assert_equal(b.grad.data, 4)
def test_forward1(self): x = Variable(np.array([[1, 2, 3], [4, 5, 6]])) w = Variable(x.data.T) b = None y = F.linear(x, w, b) res = y.data expected = np.array([[14, 32], [32, 77]]) self.assertTrue(np.array_equal(res, expected))
def test_forward3(self): layer = chainer.links.Linear(3, 2) x = np.array([[1, 2, 3], [4, 5, 6]]).astype('f') W = layer.W.data.T b = layer.b.data y = F.linear(x, W, b) cy = layer(x) self.assertTrue(array_allclose(y.data, cy.data))
def __call__(self, x): if self.W.data is None: self.in_size = x.shape[1] xp = cuda.get_array_module(x) I, O = self.in_size, self.out_size W_data = xp.random.randn(I, O).astype(np.float32) * np.sqrt(1 / I) self.W.data = W_data y = F.linear(x, self.W, self.b) return y
def conv2d_simple(x, W, b=None, stride=1, pad=0): x, W = as_variable(x), as_variable(W) Weight = W N, C, H, W = x.shape OC, C, KH, KW = Weight.shape SH, SW = pair(stride) PH, PW = pair(pad) OH = get_conv_outsize(H, KH, SH, PH) OW = get_conv_outsize(W, KW, SW, PW) col = im2col(x, (KH, KW), stride, pad, to_matrix=True) Weight = Weight.reshape(OC, -1).transpose() t = linear(col, Weight, b) y = t.reshape(N, OH, OW, OC).transpose(0, 3, 1, 2) return y
def conv2d_simple(x, W, b=None, stride=1, pad=0): x, W = as_variable(x), as_variable(W) n, c, h, w = x.shape out_c, c, kh, kw = W.shape sh, sw = _pair(stride) ph, pw = _pair(pad) out_h = utils.get_conv_outsize(h, kh, sh, ph) out_w = utils.get_conv_outsize(w, kw, sw, pw) col = im2col(x, (kh, kw), stride, pad) col = col.transpose((0, 4, 5, 1, 2, 3)).reshape((n * out_h * out_w, -1)) W = W.reshape((out_c, -1)).transpose() t = linear(col, W, b) y = t.reshape((n, out_h, out_w, -1)).transpose((0, 3, 1, 2)) return y
def conv2d_simple(x, K: Variable, b: Optional[Variable] = None, stride: int = 1, pad: int = 0): x = as_variable(x) N, C, H, W = x.shape OC, C, KH, KW = K.shape SH, SW = pair(stride) PH, PW = pair(pad) OH = get_conv_outsize(H, KH, SH, PH) OW = get_conv_outsize(W, KW, SW, PW) col = im2col(x, (KH, KW), stride, pad, to_matrix=True) K = K.reshape((OC, -1)).transpose() t = F.linear(col, K, b) y = t.reshape((N, OH, OW, OC)).transpose((0, 3, 1, 2)) return y
def __call__(self, x): y = F.linear(x, self.W, self.b) return y
def predict(x): x2 = F.sigmoid(F.linear(x, W1, b1)) x2 = F.linear(x2, W2, b2) return x2
def predict(x): y = F.linear(x, W1, b1) y = F.sigmoid(y) y = F.linear(y, W2, b2) return y
def forward(self, x): y = F.linear(x, self.W1, self.b1) y = F.sigmoid_simple(y) y = F.linear(y, self.W2, self.b2) return y
def test_forward_WO_bias(self): x = Variable(np.array([[1, 2, 3], [6, 7, 8]])) W = Variable(np.arange(1, 7).reshape(3, 2)) y = linear(x, W) assert_equal(np.array([[22, 28], [67, 88]]), y.data)
def test_forward_W_bias(self): x = Variable(np.array([[1, 2, 3], [6, 7, 8]])) W = Variable(np.arange(1, 7).reshape(3, 2)) b = Variable(np.array(5)) y = linear(x, W, b) assert_equal(np.array([[27, 33], [72, 93]]), y.data)
def __call__(self, x): if self.W.data is None: self._init_W(x) y = F.linear(x, self.W, self.b) return y
def predict(x): y = F.linear_simple(x, W1, b1) y = F.sigmoid_simple(y) y = F.linear(y, W2, b2) return y
def test_backward2(self): x = np.random.randn(100, 200) W = np.random.randn(200, 300) b = None f = lambda x: F.linear(x, W, b) self.assertTrue(check_backward(f, x))
def test_backward1(self): x = np.random.randn(3, 2) W = np.random.randn(2, 3) b = np.random.randn(3) f = lambda x: F.linear(x, W, b) self.assertTrue(check_backward(f, x))