Example #1
0
    def generate(self, xs):
        zs = self.make_noise(len(xs))

        hs_0 = F.leaky_relu(self.l0_dense(xs), slope=0.2)
        hs_0 = F.concat([hs_0, zs], axis=1)
        hs_0 = self.l1_bn(self.l1_dense(hs_0)).reshape(
            (len(xs), -1, self.s16, self.s16))

        hs_1 = F.relu(self.l2_bn(self.l2_conv(hs_0)))
        hs_1 = F.relu(self.l3_bn(self.l3_conv(hs_1)))
        hs_1 = self.l4_bn(self.l4_conv(hs_1))

        hs_2 = F.relu(F.add(hs_0, hs_1))
        hs_2 = self.l5_deconv(hs_2)
        hs_2 = self.l5_bn(self.l5_conv(hs_2))

        hs_3 = F.relu(self.l6_bn(self.l6_conv(hs_2)))
        hs_3 = F.relu(self.l7_bn(self.l7_conv(hs_3)))
        hs_3 = self.l8_bn(self.l8_conv(hs_3))

        hs_4 = F.relu(F.add(hs_2, hs_3))
        hs_4 = self.l9_deconv(hs_4)
        hs_4 = F.relu(self.l9_bn(self.l9_conv(hs_4)))

        hs_5 = self.l10_deconv(hs_4)
        hs_5 = F.relu(self.l10_bn(self.l10_conv(hs_5)))

        hs_6 = self.l11_deconv(hs_5)
        hs_6 = F.tanh(self.l11_conv(hs_6))

        return cuda.to_cpu(hs_6.data)
Example #2
0
    def forward(self, xs):
        zs = self.make_noise()

        hs_0 = F.leaky_relu(self.l0_dense(xs), slope=0.2)
        hs_0 = F.concat([hs_0, zs], axis=1)
        hs_0 = self.l1_bn(self.l1_dense(hs_0)).reshape(
            (self.batch_size, -1, self.s16, self.s16))

        hs_1 = F.relu(self.l2_bn(self.l2_conv(hs_0)))
        hs_1 = F.relu(self.l3_bn(self.l3_conv(hs_1)))
        hs_1 = self.l4_bn(self.l4_conv(hs_1))

        hs_2 = F.relu(F.add(hs_0, hs_1))
        hs_2 = self.l5_deconv(hs_2)
        hs_2 = self.l5_bn(self.l5_conv(hs_2))

        hs_3 = F.relu(self.l6_bn(self.l6_conv(hs_2)))
        hs_3 = F.relu(self.l7_bn(self.l7_conv(hs_3)))
        hs_3 = self.l8_bn(self.l8_conv(hs_3))

        hs_4 = F.relu(F.add(hs_2, hs_3))
        hs_4 = self.l9_deconv(hs_4)
        hs_4 = F.relu(self.l9_bn(self.l9_conv(hs_4)))

        hs_5 = self.l10_deconv(hs_4)
        hs_5 = F.relu(self.l10_bn(self.l10_conv(hs_5)))

        hs_6 = self.l11_deconv(hs_5)
        hs_6 = F.tanh(self.l11_conv(hs_6))

        return hs_6
Example #3
0
    def __call__(self, atoms_feat, pair_feat, global_feat, atom_idx, pair_idx,
                 start_idx, end_idx):
        # 1) Pass the Dense layer
        a_f_d = self.dense_for_atom(atoms_feat)
        p_f_d = self.dense_for_pair(pair_feat)
        g_f_d = self.dense_for_global(global_feat)

        # 2) Update the edge vector
        start_node = a_f_d[start_idx]
        end_node = a_f_d[end_idx]
        g_f_extend_with_pair_idx = g_f_d[pair_idx]
        concat_p_v = functions.concat(
            (p_f_d, start_node, end_node, g_f_extend_with_pair_idx))
        update_p = self.update_for_atom(concat_p_v)

        # 3) Update the node vector
        # 1. get sum edge feature of all nodes using scatter_add method
        zero = self.xp.zeros(a_f_d.shape, dtype=self.xp.float32)
        sum_edeg_vec = functions.scatter_add(zero, start_idx, update_p) + \
            functions.scatter_add(zero, end_idx, update_p)
        # 2. get degree of all nodes using scatter_add method
        one = self.xp.ones(p_f_d.shape, dtype=self.xp.float32)
        degree = functions.scatter_add(zero, start_idx, one) + \
            functions.scatter_add(zero, end_idx, one)
        # 3. get mean edge feature of all nodes
        mean_edge_vec = sum_edeg_vec / degree
        # 4. concating
        g_f_extend_with_atom_idx = g_f_d[atom_idx]
        concat_a_v = functions.concat(
            (a_f_d, mean_edge_vec, g_f_extend_with_atom_idx))
        update_a = self.update_for_pair(concat_a_v)

        # 4) Update the global vector
        out_shape = g_f_d.shape
        ave_p = get_mean_feat(update_p, pair_idx, out_shape, self.xp)
        ave_a = get_mean_feat(update_a, atom_idx, out_shape, self.xp)
        concat_g_v = functions.concat((ave_a, ave_p, g_f_d), axis=1)
        update_g = self.update_for_global(concat_g_v)

        # 5) Skip connection
        new_a_f = functions.add(a_f_d, update_a)
        new_p_f = functions.add(p_f_d, update_p)
        new_g_f = functions.add(g_f_d, update_g)

        # 6) dropout
        if self.dropout_ratio > 0.0:
            new_a_f = functions.dropout(new_a_f, ratio=self.dropout_ratio)
            new_p_f = functions.dropout(new_p_f, ratio=self.dropout_ratio)
            new_g_f = functions.dropout(new_g_f, ratio=self.dropout_ratio)

        return new_a_f, new_p_f, new_g_f
Example #4
0
    def __call__(self, atoms_1, g_1, atoms_2, g_2):
        """
        :param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim)
        :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim)
        :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim)
        :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim)
        :return:
        """
        # C: (mb, N_2, N_1)
        C = self.compute_attention(query=atoms_2, key=atoms_1)

        # L_2: (mb, N_2, N_1)
        L_2 = functions.softmax(C, axis=1)
        # L_1: (mb, N_1, N_2)
        L_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)

        # lt_atoms_1: (mb, N_1, hidden_dim) -> (mb, N_1, head)
        for layer in self.prev_lt_layers_1:
            atoms_1 = layer(atoms_1)
        # atoms_1 = self.prev_lt_layer_1(atoms_1)
        lt_atoms_1 = self.lt_layer_1(atoms_1)
        # lt_atoms_2: (mb, N_2, hidden_dim) -> (mb, N_2, head)
        for layer in self.prev_lt_layers_2:
            atoms_2 = layer(atoms_2)
        # atoms_2 = self.prev_lt_layer_2(atoms_2)
        lt_atoms_2 = self.lt_layer_2(atoms_2)
        # L_1: (mb, N_1, N_2), lt_atoms_2: (mb, N_2, head) -> (mb, N_1, head)
        lt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)
        # H_1: (mb, N_1, head)
        H_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))
        # L_2: (mb, N_2, N_1), lt_atoms_1: (mb, N_1, head)
        # lt_atoms_1_C: (mb, N_2, head)
        lt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)
        H_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))
        # attn_1: (mb, N_1, 1)
        attn_1 = functions.softmax(self.attention_layer_1(H_1))
        # attn_2: (mb, N_2, 1)
        attn_2 = functions.softmax(self.attention_layer_2(H_2))

        compact_1 = functions.sum(
            functions.tile(attn_1, reps=(1, 1, self.out_dim)) *
            self.j_layer(atoms_1),
            axis=1)
        compact_2 = functions.sum(
            functions.tile(attn_2, reps=(1, 1, self.out_dim)) *
            self.j_layer(atoms_2),
            axis=1)
        return compact_1, compact_2
 def __call__(self, x):
     h = self.conv0(x)
     h = self.bn0(h)
     h = F.relu(h)
     h = self.conv1(h)
     h = self.bn1(h)
     h = F.relu(h)
     h = self.conv2(h)
     h = self.bn2(h)
     h = F.add(h, x)
     h = F.relu(h)
     return h
Example #6
0
    def set2vec(self, input_set, num_timesteps, mprev=None, cprev=None, inner_prod='default', name='lstm'):
        batch_size = input_set.shape[0]
        node_dim = input_set.shape[3]
        assert self.node_dim == node_dim

        if mprev is None:
            mprev = chainer.Variable(self.xp.zeros(shape=(batch_size, node_dim), dtype=self.xp.float32))
        # (batch_size, node_dim * 2)
        mprev = functions.concat(
            [mprev, chainer.Variable(self.xp.zeros(shape=(batch_size, node_dim), dtype=self.xp.float32))], axis=1)
        if cprev is None:
            cprev = chainer.Variable(self.xp.zeros(shape=(batch_size, node_dim), dtype=self.xp.float32))

        logit_att = []
        attention_w2_data = self.xp.zeros(shape=(node_dim, node_dim), dtype=self.xp.float32)
        init = chainer.initializers.GlorotUniform(dtype=self.xp.float32)
        init(attention_w2_data)
        attention_w2 = chainer.Variable(data=attention_w2_data, name=name + "att_W_2")
        attention_v_data = self.xp.zeros(shape=(node_dim, 1), dtype=self.xp.float32)
        attention_v = chainer.Variable(data=attention_v_data, name=name + "att_V")

        for i in range(num_timesteps):
            m, c = self.lstm_block(mprev, cprev)
            query = functions.matmul(m, attention_w2)
            query = functions.reshape(query, shape=(-1, 1, 1, node_dim))
            if inner_prod == 'default':
                energies = functions.reshape(
                    functions.matmul(
                        functions.reshape(functions.tanh(
                            functions.add(functions.broadcast_to(query, shape=input_set.shape), input_set)),
                            shape=(-1, node_dim)),
                        attention_v
                    ), shape=(batch_size, -1)
                )
            elif inner_prod == 'dot':
                att_mem_reshape = functions.reshape(input_set, shape=(batch_size, -1, node_dim))
                query = functions.reshape(query, shape=(-1, node_dim, 1))
                energies = functions.reshape(functions.matmul(att_mem_reshape, query), shape=(batch_size, -1))
            else:
                raise ValueError("Invalid inner_prod type: {}".format(inner_prod))

            att = functions.softmax(energies)

            att = functions.reshape(att, shape=(batch_size, -1, 1, 1))

            read = functions.sum(functions.broadcast_to(att, shape=input_set.shape) * input_set, axis=(1, 2))
            m = functions.concat([m, read], axis=1)

            logit_att.append(m)
            mprev = m
            cprev = c

        return logit_att, c, m
 def __call__(self, x):
     # x:input
     h = self.conv0(x)  # Convolution layer
     h = self.bn0(h)  # BatchNormalization layer
     h = F.relu(h)  # ReLU layer
     h0 = self.conv1(h)  # Convolution_2 layer
     h0 = self.bn1(h0)  # BatchNormalization_2 layer
     h0 = F.relu(h0)  # ReLU_2 layer
     h0 = self.conv2(h0)  # Convolution_3 layer
     h0 = self.bn2(h0)  # BatchNormalization_3 layer
     h0 = F.relu(h0)  # ReLU_3 layer
     h0 = self.conv3(h0)  # Convolution_4 layer
     h0 = self.bn3(h0)  # BatchNormalization_4 layer
     h = F.add(h, h0)  # Add2 layer
     h = F.relu(h)  # ReLU_4 layer
     h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)  # MaxPooling layer
     # 1st repeat
     for _, f in self.res1:
         h = f(h)
     h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)  # MaxPooling_2 layer
     # 2nd repeat
     for _, f in self.res2:
         h = f(h)
     h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)  # MaxPooling_3 layer
     h1 = self.conv4(h)  # Convolution_11 layer
     h1 = self.bn4(h1)  # BatchNormalization_11 layer
     h1 = F.relu(h1)  # ReLU_11 layer
     h1 = self.conv5(h1)  # Convolution_12 layer
     h1 = self.bn5(h1)  # BatchNormalization_12 layer
     h1 = F.relu(h1)  # ReLU_12 layer
     h1 = self.conv6(h1)  # Convolution_13 layer
     h1 = self.bn6(h1)  # BatchNormalization_13 layer
     h = F.add(h, h1)  # Add2_4 layer
     h = F.relu(h)  # ReLU_13 layer
     h = F.average_pooling_2d(h, ksize=4, stride=4,
                              pad=0)  # AveragePooling layer
     h = self.fc0(h)  # Affine layer
     return h
Example #8
0
 def forward(self, x):
     # negative samples
     h = np.random.randint(0, self.mes.nodeCnt, (x.shape[0], 1))
     t = np.random.randint(0, self.mes.nodeCnt, (x.shape[0], 1))
     r = x[:, 1].reshape(-1, 1)
     x_negative = np.concatenate((h, r, t), axis=1)
     # loss
     loss_x = self.transloss(x)
     loss_ne = self.transloss(x_negative)
     loss = F.add(loss_x, F.relu(loss_ne - self.Tau))
     self.loss = F.sum(loss, axis=0).reshape(1)  # loss的值必须是一个variable的列表
     reporter.report({'loss': self.loss[0]}, self)
     accuracy = self.accuracy_func(x, self.settings['hit'])
     reporter.report({'accuracy': accuracy[0]}, self)
     return self.loss
    def __call__(self, x):
        res_x = self.block(x)

        if self.fit_dim:
            x = self.fit_conv(x)
            x = self.fit_bn(x)
            x = F.relu(x)

        if self.stride == 2:
            x = F.max_pooling_2d(x, ksize=2, pad=0, stride=self.stride)

        x = F.add(res_x, x)
        x = F.relu(x)

        return x
Example #10
0
	def __call__(self, x, a, mode, is_nasp_upd_w=False):

		node = [x]
		edges = self.children()
		pos = 0
		for i in range(1,4):
			node.append(None)
			for j in range(0,i):
				edge = edges.__next__()
				res = edge(x=node[j], a=a[pos], mode=mode, is_nasp_upd_w=is_nasp_upd_w)
				pos += 1

				if node[i] is None:
					node[i] = res
				else:
					node[i] = F.add(node[i],res)

		return node[3]
Example #11
0
 def forward(self, st, act, r, st_dash, ep_end, ISWeights):
     s = Variable(st)
     s_dash = Variable(st_dash)
     Q = self.model.Q_func(s)
     Q_dash = self.model.Q_func(s_dash)
     Q_dash_target = self.target_model.Q_func(s_dash).data
     Q_dash_idmax = np.asanyarray(list(map(np.argmax, Q_dash.data)))
     max_Q_dash = np.asanyarray([
         Q_dash_target[i][Q_dash_idmax[i]] for i in range(len(Q_dash_idmax))
     ])
     target = np.asanyarray(copy.deepcopy(Q.data), dtype=np.float32)
     for i in range(self.batch_size):
         target[
             i,
             act[i]] = r[i] + (self.gamma * max_Q_dash[i]) * (not ep_end[i])
     squared_error = F.squared_error(Q, Variable(target))
     loss = F.mean(squared_error * ISWeights)
     td_error = F.add(Q, Variable(-1 * target))
     self.loss = loss.data
     return loss, td_error
Example #12
0
    def forward(self, xs, sent_vec):
        hs_0 = F.leaky_relu(self.l0_conv(xs), slope=0.2)

        hs_1 = F.leaky_relu(self.l1_bn(self.l1_conv(hs_0)), slope=0.2)
        hs_1 = F.leaky_relu(self.l2_bn(self.l2_conv(hs_1)), slope=0.2)
        hs_1 = self.l3_bn(self.l3_conv(hs_1))

        hs_2 = F.leaky_relu(self.l4_bn(self.l4_conv(hs_1)), slope=0.2)
        hs_2 = F.leaky_relu(self.l5_bn(self.l5_conv(hs_2)), slope=0.2)
        hs_2 = self.l6_bn(self.l6_conv(hs_2))
        hs_3 = F.leaky_relu(F.add(hs_1, hs_2), slope=0.2)

        sent_vec = F.leaky_relu(self.l7_dense(sent_vec), slope=0.2)
        sent_vec = F.expand_dims(F.expand_dims(sent_vec, axis=2), axis=3)
        sent_vec = F.tile(sent_vec, (1,1,4,4))

        hs_4 = F.concat([hs_3, sent_vec], axis=1)
        hs_4 = F.leaky_relu(self.l8_bn(self.l8_conv(hs_4)), slope=0.2)
        hs_4 = self.l9_conv(hs_4)

        return hs_4.reshape((-1))
Example #13
0
        def forward(self, x):
            b_ = cf.tanh(self.b)

            # Not sure if implementation is wrong
            m_ = cf.softplus(self.m)
            # m = cf.repeat(m, 8, axis=2)
            # m = cf.repeat(m, 8, axis=1)
            m_ = cf.repeat(m_, 16, axis=2)
            m_ = cf.repeat(m_, 16, axis=1)

            b_ = b_ * m_
            x_ = cf.add(x, b_)
            x_ = cf.clip(x_, -0.5, 0.5)

            z = []
            zs, logdet = self.encoder.forward_step(x_)
            for (zi, mean, ln_var) in zs:
                z.append(zi)

            z = merge_factorized_z(z)

            return z, zs, logdet, xp.sum(xp.abs(b_.data)), xp.tanh(
                self.b.data * 1), m_, x_
Example #14
0
        def forward(self, x):
            # b_ = cf.tanh(self.b)
            b_ = self.b
            # Not sure if implementation is wrong
            self.modify_mask()
            # m = cf.repeat(m, 8, axis=2)
            # m = cf.repeat(m, 8, axis=1)
            # m = cf.repeat(m, 16, axis=2)
            # m = cf.repeat(m, 16, axis=1)
            # b = b * m
            x_ = cf.add(x, b_)
            x_ = cf.clip(x_, -0.5, 0.5)

            z = []
            zs, logdet = self.encoder.forward_step(x_)
            for (zi, mean, ln_var) in zs:
                z.append(zi)

            z = merge_factorized_z(z)

            # return z, zs, logdet, cf.batch_l2_norm_squared(b), xp.tanh(self.b.data*1), cur_x, m
            return z, zs, logdet, xp.sum(xp.abs(b_.data)), xp.tanh(
                self.b.data * 1), self.m, x_
Example #15
0
 def __call__(self, *xs):
     return F.add(*xs)