def __call__(self, x): h = F.crelu(self.qlin0(x)) h = F.crelu(self.qlin1(h)) qmu = self.qlin_mu(h) qln_var = self.qlin_ln_var(h) return qmu, qln_var
def decode(self, z): # pdb.set_trace() a = self.a_enc # If this function is coming from the sampling call, the batch size of z and a won't match. Manually handle that here. if (a.shape[0] != z.shape[0]): a.volatile = 'ON' batch_size = z.shape[0] a.data = a.data[0:batch_size, :] net_input = F.concat((z, a), axis=1) h = self.plinx0(net_input) h = self.plinx_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers - 1): layer_name = 'plinx' + str(i + 1) h = self[layer_name](h) layer_name = 'plinx_batch_norm_' + str(i + 1) h = self[layer_name](h) h = F.crelu(h) self.p_ber_prob_logit = self.plinx_ber_prob(h) return self.p_ber_prob_logit
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers - 1): layer_name = 'plin' + str(i + 1) h = F.crelu(self[layer_name](h)) self.p_ber_prob_logit = self.plin_ber_prob(h)
def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h)
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers - 1): layer_name = 'plin' + str(i + 1) h = F.crelu(self[layer_name](h)) self.pmu = self.plin_mu(h) self.pln_var = self.plin_ln_var(h)
def encode_a(self, x): a_params = F.crelu(self.qlina0(x)) for i in range(self.num_layers - 1): layer_name = 'qlina' + str(i + 1) a_params = F.crelu(self[layer_name](a_params)) self.qmu_a = self.qlina_mu(a_params) self.qln_var_a = self.qlina_ln_var(a_params) return self.qmu_a, self.qln_var_a
def encode_z(self, x, a): # a = F.gaussian(self.qmu_a, self.qln_var_a) # This should be outside the encoding function. Pass the function a. net_input = F.concat((x, a), axis=1) h = F.crelu(self.qlinz0(net_input)) for i in range(self.num_layers - 1): layer_name = 'qlinz' + str(i + 1) h = F.crelu(self[layer_name](h)) self.qmu_z = self.qlinz_mu(h) self.qln_var_z = self.qlinz_ln_var(h) return self.qmu_z, self.qln_var_z
def decode_a(self, z, x): net_input = F.concat((x, z), axis=1) h = F.crelu(self.plina0(net_input)) for i in range(self.num_layers - 1): layer_name = 'plina' + str(i + 1) h = F.crelu(self[layer_name](h)) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def decode(self, z): h = self.plinx0(z) h = self.plinx_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers - 1): layer_name = 'plinx' + str(i + 1) h = self[layer_name](h) layer_name = 'plinx_batch_norm_' + str(i + 1) h = self[layer_name](h) h = F.crelu(h) self.p_ber_prob_logit = self.plinx_ber_prob(h) return self.p_ber_prob_logit
def iaf(self, z, h, lin1, lin2): ms = F.crelu(lin1(F.concat((z, h), axis=1))) ms = lin2(ms) m, s = F.split_axis(ms, 2, axis=1) s = F.sigmoid(s) z = s * z + (1 - s) * m # pdb.set_trace() return z, -F.sum(F.log(s), axis=1)
def encode(self, x): h = self.qlin0(x) h = self.qlin_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers - 1): layer_name = 'qlin' + str(i + 1) h = self[layer_name](h) layer_name = 'qlin_batch_norm_' + str(i + 1) h = self[layer_name](h) h = F.crelu(h) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) self.qh_vec_0 = self.qlin_h_vec_0(h) return self.qmu, self.qln_var, self.qh_vec_0
def decode_a(self, z): # net_input = F.concat((x,z), axis=1) h = self.plina0(z) h = self.plina_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers - 1): layer_name = 'plina' + str(i + 1) h = self[layer_name](h) layer_name = 'plina_batch_norm_' + str(i + 1) h = self[layer_name](h) h = F.crelu(h) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.crelu(x, axis=self.axis) self.assertEqual(y.data.dtype, self.dtype) self.assertEqual(y.data.shape, self.y_shape) expected_former = numpy.maximum(self.x, 0) expected_latter = numpy.maximum(-self.x, 0) expected = numpy.concatenate( (expected_former, expected_latter), axis=self.axis) testing.assert_allclose(expected, y.data)
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.crelu(x, axis=self.axis) self.assertEqual(y.data.dtype, numpy.float32) self.assertEqual(y.data.shape, self.y_shape) expected_former = self.x.copy() expected_latter = self.x.copy() for i in numpy.ndindex(self.x.shape): expected_former[i] = max(0, self.x[i]) expected_latter[i] = max(0, -self.x[i]) expected = numpy.concatenate((expected_former, expected_latter), axis=self.axis) testing.assert_allclose(expected, y.data)
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.crelu(x, axis=self.axis) self.assertEqual(y.data.dtype, numpy.float32) self.assertEqual(y.data.shape, self.y_shape) expected_former = self.x.copy() expected_latter = self.x.copy() for i in numpy.ndindex(self.x.shape): expected_former[i] = max(0, self.x[i]) expected_latter[i] = max(0, -self.x[i]) expected = numpy.concatenate( (expected_former, expected_latter), axis=self.axis) gradient_check.assert_allclose(expected, y.data)
def __call__(self, x): return F.crelu(x, self.axis)
def forward(self, inputs, device): x, = inputs return functions.crelu(x, axis=self.axis),
def __call__(self, z): h = F.crelu(self.plin0(z)) h = F.crelu(self.plin1(h)) ph = self.plin2(h) return MNISTLikelihood(ph)
def __call__(self, x): return functions.crelu(x, self.axis)