コード例 #1
0
 def forward(self, x):
     x = F.leaky_relu_(self.conv_channels_up(x))
     x = self.convList0(x)
     x = hollowSampleFunction.hollowUpSampleTensor(x)
     x = self.convList1(x)
     x = F.leaky_relu_(self.conv_channels_down(x))
     return x
コード例 #2
0
 def forward(self, x):
     out = F.leaky_relu_(self.bn1(self.conv1(x)))
     out = F.leaky_relu_(self.bn2(self.conv2(out)))
     out = self.bn3(self.conv3(out))
     out += self.shortcut(x)
     out = F.leaky_relu_(out)
     return out
コード例 #3
0
 def forward(self, x):
     x = F.leaky_relu_(self.conv1(x))
     x = self.conv1_bn(self.pool1(x))
     x = self.conv2_bn(F.leaky_relu_(self.conv2(x)))
     x = self.conv3(x)
     x = x.reshape(x.shape[0], -1)
     return x
コード例 #4
0
    def _forward(self, x1_pyramid, x2_pyramid, neg=False):
        flows = []
        for i, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
            if i == 0:
                corr = self.corr(x1, x2)
                feat, flow = self.flow_estimators[i](corr)
                if neg:
                    flow = -F.relu(-flow)
                else:
                    flow = F.relu(flow)
            else:
                # predict the normalized disparity to keep consistent with MonoDepth
                # for reusing the hyper-parameters
                up_flow = F.interpolate(flow,
                                        scale_factor=2,
                                        mode='bilinear',
                                        align_corners=True)

                zeros = torch.zeros_like(up_flow)
                x2_warp = flow_warp(
                    x2,
                    torch.cat([up_flow, zeros], dim=1),
                )

                corr = self.corr(x1, x2_warp)
                F.leaky_relu_(corr)

                feat, flow = self.flow_estimators[i](torch.cat(
                    [corr, x1, up_flow], dim=1))

                flow = flow + up_flow

                if neg:
                    flow = -F.relu(-flow)
                else:
                    flow = F.relu(flow)

                if self.context_networks[i]:
                    flow_fine = self.context_networks[i](torch.cat(
                        [flow, feat], dim=1))
                    flow = flow + flow_fine

                    if neg:
                        flow = -F.relu(-flow)
                    else:
                        flow = F.relu(flow)

            if neg:
                flows.append(-flow)
            else:
                flows.append(flow)
            if len(flows) == self.n_out:
                break
        flows = [
            F.interpolate(flow * 4,
                          scale_factor=4,
                          mode='bilinear',
                          align_corners=True) for flow in flows
        ]
        return flows[::-1]
コード例 #5
0
    def forward(self, inputs: Tensor, targets: Tensor = None, batch_seen: int = None) -> Tensor:
        """
        dynamic convolutional recurrent neural network
        :param inputs: [B, n_hist, N, input_dim]
        :param supports: list of tensors, each tensor is with shape [N, N]
        :param targets: exists for training, tensor, [B, n_pred, N, output_dim]
        :param batch_seen: int, the number of batches the model has seen
        :return: [B, n_pred, N, output_dim],[]
        """
        if self.method == 'big':
            graph = list()
            nodevec1 = self.nodevec1
            nodevec2 = self.nodevec2
            n = nodevec1.size(0)
            self.graph0 = F.leaky_relu_(torch.mm(nodevec1, nodevec2))
            graph.append(self.graph0)


            nodevec1 = nodevec1.mm(self.w1) + self.b1.repeat(n, 1)
            nodevec2 = (nodevec2.T.mm(self.w1) + self.b1.repeat(n, 1)).T
            self.graph1 = F.leaky_relu_(torch.mm(nodevec1, nodevec2))
            graph.append(self.graph1)
            nodevec1 = nodevec1.mm(self.w2) + self.b2.repeat(n, 1)
            nodevec2 = (nodevec2.T.mm(self.w2) + self.b2.repeat(n, 1)).T
            self.graph2 = F.leaky_relu_(torch.mm(nodevec1, nodevec2))
            graph.append(self.graph2)
        else:
            graph = self._mahalanobis_distance_cal()
        states = self.encoder(inputs, graph)
        outputs = self.decoder(graph, states, targets, self._compute_sampling_threshold(batch_seen))
        return outputs, graph
コード例 #6
0
    def forward(self, x):
        x = self.fc1_bn(F.leaky_relu_(self.fc1(x)))
        x = self.fc2_bn(F.leaky_relu_(self.fc2(x)))
        x = x.reshape(x.shape[0], 32, 16, 16)
        x = self.conv1_bn(F.leaky_relu_(self.conv1(x)))
        x = nn.Tanh()(self.conv2(x))

        return x
コード例 #7
0
 def forward(self, input):
     x = self.pool_and_inject(input)
     left_side = F.leaky_relu_(
         self.left_conv2(F.leaky_relu_(self.left_conv1(x))))
     right_side = F.leaky_relu_(
         self.right_conv2(F.leaky_relu_(self.right_conv1(x))))
     x = torch.cat((left_side, right_side), 1)
     x = F.leaky_relu_(self.conv3(x))
     return x + input
    def forward(self, style_embeddings, class_embeddings):
        style_embeddings = F.leaky_relu_(self.style_input(style_embeddings), negative_slope=0.2)
        class_embeddings = F.leaky_relu_(self.class_input(class_embeddings), negative_slope=0.2)

        x = torch.cat((style_embeddings, class_embeddings), dim=1)
        x = x.view(x.size(0), 128, 2, 2)
        x = self.deconv_model(x)

        return x
コード例 #9
0
 def forward(self, x):
     y = F.leaky_relu_(self.hc(x))
     # batchSize * 1 * 128 * 128 -> batchSize * 512 * 128 * 128
     y = F.leaky_relu_(self.conv(y))
     # batchSize * 512 * 128 * 128 -> batchSize * 256 * 128 * 128
     y = y.view(y.shape[0], 1, 256, y.shape[2], y.shape[3])
     # batchSize * 256 * 128 * 128 -> batchSize * 1 * 256 * 128 * 128
     y = y.permute(0, 1, 3, 4, 2)
     # batchSize * 1 * 256 * 128 * 128 -> batchSize * 1 * 128 * 128 * 256
     return y
コード例 #10
0
    def forward(self, inputs):
        x = self.layer_1(inputs)
        x = F.leaky_relu_(x)
        x = self.conv_1(x)
        x = self.layer_2(x)
        x = F.leaky_relu_(x)
        x = self.conv_2(x)
        x = x + inputs

        return x
コード例 #11
0
 def forward(self, input):
     if (self.downSample == True):
         return self.k * F.leaky_relu_(
             self.convDown(input)) + (1 - self.k) * F.leaky_relu_(
                 self.pixConv(pixelShuffleFunction.pixel_unshuffle(
                     input, 2)))
     else:
         return self.k * F.leaky_relu_(
             self.convUp(input)) + (1 - self.k) * F.leaky_relu_(
                 F.pixel_shuffle(self.pixConv(input), 2))
コード例 #12
0
    def forward(self, x):
        z = F.linear(x, self.W[0])
        F.leaky_relu_(z, negative_slope=self.negative_slope)

        for W, U in zip(self.W[1:-1], self.U[:-1]):
            z = F.linear(x,
                         W) + F.linear(z, F.softplus(U)) * self.negative_slope
            z = F.leaky_relu_(z, negative_slope=self.negative_slope)

        z = F.linear(x, self.W[-1]) + F.linear(z, F.softplus(self.U[-1]))
        return F.relu(z) + self.eps * (x**2).sum(1)[:, None]
コード例 #13
0
 def encode(self, X):
     """
     Encode the image X.
     """
     res = X
     for ii in range(self.n_conv):
         res = F.leaky_relu_(self.conv_bn[ii](self.conv[ii](res)))
     res = res.view(-1, self.d_fc)
     for M in self.encoders:
         res = F.leaky_relu_(M(res))
     return self.mu_layer(res), self.var_layer(res)
コード例 #14
0
    def forward(self,
                x,
                return_latents=False,
                return_rgb=True,
                randomize_noise=True):
        """Forward function for GFPGANv1Clean.

        Args:
            x (Tensor): Input images.
            return_latents (bool): Whether to return style latents. Default: False.
            return_rgb (bool): Whether return intermediate rgb images. Default: True.
            randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
        """
        conditions = []
        unet_skips = []
        out_rgbs = []

        # encoder
        feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2)
        for i in range(self.log_size - 2):
            feat = self.conv_body_down[i](feat)
            unet_skips.insert(0, feat)
        feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2)

        # style code
        style_code = self.final_linear(feat.view(feat.size(0), -1))
        if self.different_w:
            style_code = style_code.view(style_code.size(0), -1,
                                         self.num_style_feat)

        # decode
        for i in range(self.log_size - 2):
            # add unet skip
            feat = feat + unet_skips[i]
            # ResUpLayer
            feat = self.conv_body_up[i](feat)
            # generate scale and shift for SFT layers
            scale = self.condition_scale[i](feat)
            conditions.append(scale.clone())
            shift = self.condition_shift[i](feat)
            conditions.append(shift.clone())
            # generate rgb images
            if return_rgb:
                out_rgbs.append(self.toRGB[i](feat))

        # decoder
        image, _ = self.stylegan_decoder([style_code],
                                         conditions,
                                         return_latents=return_latents,
                                         input_is_latent=self.input_is_latent,
                                         randomize_noise=randomize_noise)

        return image, out_rgbs
コード例 #15
0
    def message(self, x_j: Tensor, x_i: Tensor, edge_attr: Tensor,
                index: Tensor, ptr: OptTensor,
                size_i: Optional[int]) -> Tensor:

        x_j = F.leaky_relu_(self.lin1(torch.cat([x_j, edge_attr], dim=-1)))
        alpha_j = (x_j * self.att_l).sum(dim=-1)
        alpha_i = (x_i * self.att_r).sum(dim=-1)
        alpha = alpha_j + alpha_i
        alpha = F.leaky_relu_(alpha)
        alpha = softmax(alpha, index, ptr, size_i)
        alpha = F.dropout(alpha, p=self.dropout, training=self.training)
        return self.lin2(x_j) * alpha.unsqueeze(-1)
コード例 #16
0
 def forward(self, x):
     x = self.conv1(x)
     x = F.leaky_relu_(x)
     x = self.conv2(x)
     x = self.bn2(x)
     x = F.leaky_relu_(x)
     x = self.conv3(x)
     x = self.bn3(x)
     x = F.leaky_relu_(x)
     x = self.conv4(x)
     x = self.bn4(x)
     return F.sigmoid(x)
コード例 #17
0
 def forward(self, x, y, z):
     x = F.leaky_relu_(self.conv1(x))
     x = self.conv1_bn(self.pool1(x))
     x = self.conv2_bn(F.leaky_relu_(self.conv2(x)))
     x = self.conv3(x)
     x = x.reshape(x.shape[0], -1)
     y = y.reshape(y.shape[0], -1)
     z = z.reshape(z.shape[0], -1)
     x = torch.cat([x,y,z],dim=1)
     output = F.relu_(self.input_linear(x))
     output = F.relu(self.output_1(output))
     output = self.output_2(output)
     return output
コード例 #18
0
 def forward(self, user_nodes, item_nodes):
     x = F.normalize(self.id_embedding).cuda()
     x = F.leaky_relu_(self.conv_embed_1(x, self.edge_index))
     x1 = F.leaky_relu_(self.conv_embed_2(x, self.edge_index))
     x2 = F.leaky_relu(self.conv_embed_3(x1, self.edge_index))
     x = torch.cat((x, x1, x2), dim=1)
     self.result_embed = x
     user_tensor = x[user_nodes]
     self.t_feat = torch.tensor(scatter_('mean', self.word_embedding(self.words_tensor[1]), self.words_tensor[0])).cuda()
     item_feat = torch.cat((self.v_feat[item_nodes-self.num_user], self.a_feat[item_nodes-self.num_user], self.t_feat[item_nodes-self.num_user]), dim=1)
     item_tensor = x[item_nodes] + F.leaky_relu_(self.linear_layer1(item_feat))
     scores = torch.sum(user_tensor*item_tensor, dim=1)
     return scores
コード例 #19
0
    def forward(self, x):
        x = self.conv1(x)
        x = F.leaky_relu_(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = F.leaky_relu_(x)
        x = self.conv3(x)
        x = self.bn3(x)
        x = F.leaky_relu_(x)
        x = self.conv4(x)
        x = self.bn4(x)
        # x_sigmod = F.sigmoid(x)

        # return x_sigmod, x
        return x
コード例 #20
0
    def accuracy(self, dataset, topk=10, neg_num=1000):
        all_set = set(list(np.arange(neg_num)))
        sum_pre = 0.0
        sum_recall = 0.0
        sum_ndcg = 0.0
        sum_item = 0
        bar = tqdm(total=len(dataset))

        for data in dataset:
            bar.update(1)
            if len(data) < 1002:
                continue

            sum_item += 1
            user = data[0]
            neg_items = data[1:1001]
            pos_items = data[1001:]

            batch_user_tensor = torch.tensor(user).cuda() 
            batch_pos_tensor = torch.tensor(pos_items).cuda()
            batch_neg_tensor = torch.tensor(neg_items).cuda()

            pos_item_feat = torch.cat((self.v_feat[batch_pos_tensor-self.num_user], self.a_feat[batch_pos_tensor-self.num_user], self.t_feat[batch_pos_tensor-self.num_user]), dim=1)
            neg_item_feat = torch.cat((self.v_feat[batch_neg_tensor-self.num_user], self.a_feat[batch_neg_tensor-self.num_user], self.t_feat[batch_neg_tensor-self.num_user]), dim=1)
    
            user_embed = self.result_embed[batch_user_tensor]
            pos_v_embed = self.result_embed[batch_pos_tensor] + F.leaky_relu_(self.linear_layer1(pos_item_feat))
            neg_v_embed = self.result_embed[batch_neg_tensor] + F.leaky_relu_(self.linear_layer1(neg_item_feat))

            num_pos = len(pos_items)
            pos_score = torch.sum(pos_v_embed*user_embed, dim=1)
            neg_score = torch.sum(neg_v_embed*user_embed, dim=1)

            _, index_of_rank_list = torch.topk(torch.cat((neg_score, pos_score)), topk)
            index_set = set([iofr.cpu().item() for iofr in index_of_rank_list])
            num_hit = len(index_set.difference(all_set))
            sum_pre += float(num_hit/topk)
            sum_recall += float(num_hit/num_pos)
            ndcg_score = 0.0
            for i in range(num_pos):
                label_pos = neg_num + i
                if label_pos in index_of_rank_list:
                    index = list(index_of_rank_list.cpu().numpy()).index(label_pos)
                    ndcg_score = ndcg_score + math.log(2) / math.log(index + 2)
            sum_ndcg += ndcg_score/num_pos
        bar.close()

        return sum_pre/sum_item, sum_recall/sum_item, sum_ndcg/sum_item
コード例 #21
0
 def forward(self, style_emb, class_label):
     # Get class dim
     class_emb = torch.index_select(self.word_dict,
                                    dim=0,
                                    index=class_label)
     # 1. FC
     style_emb = F.leaky_relu_(self._fc_style(style_emb),
                               negative_slope=0.2)
     class_emb = F.leaky_relu_(self._fc_class(class_emb),
                               negative_slope=0.2)
     # 2. Convolution
     x = torch.cat((style_emb, class_emb), dim=1)
     x = x.view(x.size(0), 128, 2, 2)
     x = self._deconv_blocks(x)
     # Return
     return x
コード例 #22
0
    def forward(self, input):
        '''input: (batch_size, 784)
        '''

        batch_size = input.shape[0]
        x = input.view(batch_size, 1, 28, 28)

        x = F.leaky_relu_(self.conv1(x), 0.2)
        x = F.leaky_relu_(self.bn2(self.conv2(x)), 0.2)
        x = F.leaky_relu_(self.bn3(self.conv3(x)), 0.2)

        x = x.view(-1, x.shape[1] * x.shape[2] * x.shape[3])
        x = self.fc_final(x)
        x = torch.sigmoid(x)

        return x
コード例 #23
0
    def forward(self, edge_index):

        if self.is_word:
            features = torch.tensor(
                scatter_('mean', self.features(self.word_tensor[1]),
                         self.word_tensor[0])).cuda()
        else:
            features = F.leaky_relu(self.MLP(self.features))

        if self.has_norm:
            preference = F.normalize(self.preference)
            features = F.normalize(features)

        for i in range(self.num_routing):
            x = torch.cat((preference, features), dim=0)
            x_hat_1 = self.conv_embed_1(x, edge_index)
            preference = preference + x_hat_1[:self.num_user]

            if self.has_norm:
                preference = F.normalize(preference)

        x = torch.cat((preference, features), dim=0)
        edge_index = torch.cat((edge_index, edge_index[[1, 0]]), dim=1)

        x_hat_1 = self.conv_embed_1(x, edge_index)

        if self.has_act:
            x_hat_1 = F.leaky_relu_(x_hat_1)

        return x + x_hat_1, self.conv_embed_1.alpha.view(-1, 1)
コード例 #24
0
 def forward(self, x):
     out = F.leaky_relu_(self.conv1(x), negative_slope=0.2)
     # upsample/downsample
     out = F.interpolate(out,
                         scale_factor=self.scale_factor,
                         mode='bilinear',
                         align_corners=False)
     out = F.leaky_relu_(self.conv2(out), negative_slope=0.2)
     # skip
     x = F.interpolate(x,
                       scale_factor=self.scale_factor,
                       mode='bilinear',
                       align_corners=False)
     skip = self.skip(x)
     out = out + skip
     return out
コード例 #25
0
    def forward(self, x, edge_index, edge_attr, batch):
        """"""
        # Atom Embedding:
        x = F.leaky_relu_(self.lin1(x))

        h = F.elu_(self.atom_convs[0](x, edge_index, edge_attr))
        h = F.dropout(h, p=self.dropout, training=self.training)
        x = self.atom_grus[0](h, x).relu_()

        for conv, gru in zip(self.atom_convs[1:], self.atom_grus[1:]):
            h = F.elu_(conv(x, edge_index))
            h = F.dropout(h, p=self.dropout, training=self.training)
            x = gru(h, x).relu_()

        # Molecule Embedding:
        row = torch.arange(batch.size(0), device=batch.device)
        edge_index = torch.stack([row, batch], dim=0)

        out = global_add_pool(x, batch).relu_()
        for t in range(self.num_timesteps):
            h = F.elu_(self.mol_conv((x, out), edge_index))
            h = F.dropout(h, p=self.dropout, training=self.training)
            out = self.mol_gru(h, out).relu_()

        # Predictor:
        out = F.dropout(out, p=self.dropout, training=self.training)
        return self.lin2(out)
コード例 #26
0
    def forward(self, batched_data):
        """"""
        x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
        # Atom Embedding:
        x = F.leaky_relu_(self.atom_encoder(x))
        edge_attr = self.bond_encoder(edge_attr)

        h = F.elu_(self.atom_convs[0](x, edge_index, edge_attr))
        h = F.dropout(h, p=self.drop_ratio, training=self.training)
        x = self.atom_grus[0](h, x).relu_()

        for conv, gru in zip(self.atom_convs[1:], self.atom_grus[1:]):
            h = F.elu_(conv(x, edge_index))
            h = F.dropout(h, p=self.drop_ratio, training=self.training)
            x = gru(h, x).relu_()

        # Molecule Embedding:
        row = torch.arange(batch.size(0), device=batch.device)
        edge_index = torch.stack([row, batch], dim=0)

        out = global_add_pool(x, batch).relu_()
        for t in range(self.num_timesteps):
            h = F.elu_(self.mol_conv((x, out), edge_index))
            h = F.dropout(h, p=self.drop_ratio, training=self.training)
            out = self.mol_gru(h, out).relu_()

        # Predictor:
        out = F.dropout(out, p=self.drop_ratio, training=self.training)
        return self.graph_pred_linear(out)
コード例 #27
0
ファイル: utils.py プロジェクト: alebeck/tracking_wo_bnw
def correlate(input1, input2, args):
    out_corr = spatial_correlation_sample(input1, input2, **args)
    # collate dimensions 1 and 2 in order to be treated as a
    # regular 4D tensor
    b, ph, pw, h, w = out_corr.size()
    out_corr = out_corr.view(b, ph * pw, h, w) / input1.size(1)
    return F.leaky_relu_(out_corr, 0.1)
コード例 #28
0
    def forward(self, edge_index, weight_vector):
        x = self.id_embedding
        edge_index = torch.cat((edge_index, edge_index[[1, 0]]), dim=1)

        if self.has_norm:
            x = F.normalize(x)

        x_hat_1 = self.conv_embed_1(x, edge_index, weight_vector)

        if self.has_act:
            x_hat_1 = F.leaky_relu_(x_hat_1)

        x_hat_2 = self.conv_embed_2(x_hat_1, edge_index, weight_vector)
        if self.has_act:
            x_hat_2 = F.leaky_relu_(x_hat_2)

        return x + x_hat_1 + x_hat_2
コード例 #29
0
    def decode(self, Z):
        """
        Decode the latent representation Z.
        """
        res = Z
        for M in self.decoders[:]:
            res = F.leaky_relu_(M(res))

        res = res.view(-1, self.n_last_channels, self.d_last_image,
                       self.d_last_image)

        for ii in range(self.n_conv - 1):
            res = F.leaky_relu_(self.deconv[ii](self.deconv_bn[ii](res)))

        res = torch.sigmoid(self.deconv[-1](self.deconv_bn[-1](res)))

        return res
コード例 #30
0
    def forward(self, input, flip_feat=None):
        # Encoder
        # No norm on the first layer
        e1 = self.e1_c(input)
        e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
        e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
        e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
        e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
        e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))

        e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
        # No norm in the inner_most layer
        e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))

        # Decoder
        d1 = self.d1_norm(self.d1_dc(F.relu_(e8)))
        d2 = self.d2_norm(self.d2_dc(F.relu_(self.cat_feat(d1, e7))))
        d3 = self.d3_norm(self.d3_dc(F.relu_(self.cat_feat(d2, e6))))
        d4 = self.d4_norm(self.d4_dc(F.relu_(self.cat_feat(d3, e5))))
        d5 = self.d5_norm(self.d5_dc(F.relu_(self.cat_feat(d4, e4))))
        tmp, innerFeat = self.shift(
            self.innerCos(F.relu_(self.cat_feat(d5, e3))), flip_feat)
        d6 = self.d6_norm(self.d6_dc(tmp))
        d7 = self.d7_norm(self.d7_dc(F.relu_(self.cat_feat(d6, e2))))
        # No norm on the last layer
        d8 = self.d8_dc(F.relu_(self.cat_feat(d7, e1)))

        d8 = torch.tanh(d8)

        return d8, innerFeat