Example #1
0
    def forward(self, x):
        task = config_task.task
        y = self.conv(x)
        if self.second == 0:
            if config_task.isdropout1:
                x = F.dropout2d(x, p=0.5, training = self.training)
        else:
            if config_task.isdropout2:
                x = F.dropout2d(x, p=0.5, training = self.training)
        if config_task.mode == 'parallel_adapters' and self.is_proj:
            y = y + self.parallel_conv[task](x)
        y = self.bns[task](y)

        return y
Example #2
0
    def forward(self, x):
        conv1 = self.conv1(x)     #1/4
        conv2 = self.conv2(conv1) #1/4
        conv3 = self.conv3(conv2) #1/8
        conv4 = self.conv4(conv3) #1/16
        conv5 = self.conv5(conv4) #1/32

        center_512 = self.center_global_pool(conv5)
        center_64 = self.center_conv1x1(center_512)
        center_64_flatten = center_64.view(center_64.size(0), -1)
        center_fc = self.center_fc(center_64_flatten)

        f = self.center(conv5)
        d5 = self.decoder5(f, conv5)
        d4 = self.decoder4(d5, conv4)
        d3 = self.decoder3(d4, conv3)
        d2 = self.decoder2(d3, conv2)
        d1 = self.decoder1(d2)

        hypercol = torch.cat((
            d1,
            F.upsample(d2, scale_factor=2,mode='bilinear'),
            F.upsample(d3, scale_factor=4, mode='bilinear'),
            F.upsample(d4, scale_factor=8, mode='bilinear'),
            F.upsample(d5, scale_factor=16, mode='bilinear')),1)
        hypercol = F.dropout2d(hypercol, p = 0.50)

        x_no_empty = self.logits_no_empty(hypercol)
        hypercol_add_center = torch.cat((
            hypercol,
            F.upsample(center_64, scale_factor=128,mode='bilinear')),1)

        x_final = self.logits_final( hypercol_add_center)
        return center_fc, x_no_empty, x_final
Example #3
0
    def forward(self, x, e = None):
        x = F.upsample(x, scale_factor=2, mode='bilinear')
        if e is not None:
            x = torch.cat([x,e],1)
            x = F.dropout2d(x, p = 0.50)

        x = self.conv1(x)
        x = self.conv2(x)
        x = self.SCSE(x)
        return x
 def forward(self, x):
     input_adjust = self.input_adjust(x)
     conv1 = self.conv1(input_adjust)
     conv2 = self.conv2(conv1)
     conv3 = self.conv3(conv2)
     center = self.conv4(conv3)
     dec4 = self.dec4(center)
     dec3 = self.dec3(torch.cat([dec4, conv3], 1))
     dec2 = self.dec2(torch.cat([dec3, conv2], 1))
     dec1 = F.dropout2d(self.dec1(torch.cat([dec2, conv1], 1)), p=self.dropout_2d)
     print('input_adjust ', input_adjust.shape, '\ncenter ' , center.shape, '\ndec1: ', dec1.shape, self.final(dec1).shape)
     return self.final(dec1)
 def forward(self, x):
         features = self.encoder.features(x)
         relued_features = self.encoder.relu(features)
         avg_pooled_features = self.encoder.avg_pool(relued_features)
         center = self.center(avg_pooled_features)
         dec5 = self.dec5(torch.cat([center, avg_pooled_features], 1))
         dec4 = self.dec4(torch.cat([dec5, relued_features], 1))
         dec3 = self.dec3(torch.cat([dec4, features], 1))
         dec2 = self.dec2(dec3)
         dec1 = self.dec1(dec2)
         dec0 = self.dec0(dec1)
         return self.final(F.dropout2d(dec0, p=self.dropout_2d))
 def forward(self, x):
     input_adjust = self.input_adjust(x)
     conv1 = self.conv1(input_adjust)
     conv2 = self.conv2(conv1)
     conv3 = self.conv3(conv2)
     conv4 = self.conv4(conv3)
     center = self.center(conv4)
     dec5 = self.dec5(torch.cat([center, conv4], 1))
     dec4 = self.dec4(torch.cat([dec5, conv3], 1))
     dec3 = self.dec3(torch.cat([dec4, conv2], 1))
     dec2 = self.dec2(torch.cat([dec3, conv1], 1))
     dec1 = self.dec1(dec2)
     dec0 = self.dec0(dec1)
     #print('input_adjust ', input_adjust.shape, '\ncenter ' , center.shape, '\ndec1: ', dec1.shape, self.final(F.dropout2d(dec0, p=self.dropout_2d).shape))
     return self.final(F.dropout2d(dec0, p=self.dropout_2d))
    def forward(self, x):
        conv1 = self.conv1(x)  #1/4
        conv2 = self.conv2(conv1)  #1/4
        conv3 = self.conv3(conv2)  #1/8
        conv4 = self.conv4(conv3)  #1/16
        conv5 = self.conv5(conv4)  #1/32

        center_512 = self.center_global_pool(conv5)
        center_64 = self.center_conv1x1(center_512)
        center_64_flatten = center_64.view(center_64.size(0), -1)
        center_fc = self.center_fc(center_64_flatten)

        f = self.center(conv5)

        conv5 = self.dec5_1x1(conv5)
        d5 = self.decoder5(f, conv5)

        conv4 = self.dec4_1x1(conv4)
        d4 = self.decoder4(d5, conv4)

        conv3 = self.dec3_1x1(conv3)
        d3 = self.decoder3(d4, conv3)

        conv2 = self.dec2_1x1(conv2)
        d2 = self.decoder2(d3, conv2)

        d1 = self.decoder1(d2)

        hypercol = torch.cat(
            (d1, F.upsample(d2, scale_factor=2, mode='bilinear'),
             F.upsample(d3, scale_factor=4, mode='bilinear'),
             F.upsample(d4, scale_factor=8, mode='bilinear'),
             F.upsample(d5, scale_factor=16, mode='bilinear')), 1)

        hypercol = F.dropout2d(hypercol, p=0.50)

        x_no_empty = self.logits_no_empty(hypercol)

        hypercol_add_center = torch.cat(
            (hypercol,
             F.upsample(
                 center_64, scale_factor=hypercol.shape[2], mode='bilinear')),
            1)

        x_final = self.logits_final(hypercol_add_center)

        return center_fc, x_no_empty, x_final
Example #8
0
    def forward(self, x):
        # (x has shape (batch_size, 1, h, w))
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]

        x = torch.cat([
                (x-mean[2])/std[2],
                (x-mean[1])/std[1],
                (x-mean[0])/std[0],
                ], 1)

        e2,e3,e4,e5 = self.resnet(x)
#        e2.shape : [2, 64, 32, 32]
#        e3.shape:([2, 128, 16, 16]
#        e4.shape: [2, 256, 16, 16]
#        e5.shape: [2, 512, 16, 16]
        dlab = self.aspp(e5)   #[2, 256, 16, 16]

        c = self.center(e5)         #[2, 256, 8, 8]
        d5 = self.decoder5(c, e5)   #[2, 64, 16, 16]
        d4 = self.decoder4(d5, e4)  #[2, 64, 16, 16]
        d3 = self.decoder3(d4, e3)  #[2, 64, 16, 16]
        d2 = self.decoder2(d3, e2)  #[2, 64, 32, 32]
        d1 = self.decoder1(d2)      #[2, 64, 64, 64]


        output = torch.cat((
                F.upsample(dlab, scale_factor=8, mode="bilinear", align_corners=False),
                F.upsample(d1, scale_factor=2, mode='bilinear', align_corners=False),
                F.upsample(d2, scale_factor=4, mode='bilinear', align_corners=False),
                F.upsample(d3, scale_factor=8, mode='bilinear', align_corners=False),
                F.upsample(d4, scale_factor=8, mode='bilinear', align_corners=False),
                F.upsample(d5, scale_factor=8, mode='bilinear', align_corners=False),
                ), 1)               #320, 128, 128
        output = self.se_f(output)

        output = F.dropout2d(output, p=0.5)
        output = self.outc(output)          #1, 101,101


        # crop prediction to the same shapse as input in eval mode.
        if not self.training:
            crop_start = (x.shape[-1]-101)//2
            crop_end = crop_start + 101
            output = output[:,:,crop_start:crop_end,crop_start:crop_end]

        return output.squeeze()
Example #9
0
 def forward(self, x):
     x = self.conv1a(x)
     x = self.batchnorm1a(x)
     x = self.conv1b(x)
     x = self.batchnorm1b(x)
     x = self.conv2(x)
     x = F.elu(self.batchnorm2(x))
     x = F.dropout2d(x,0.5)
     s = x.shape
     a = s[1]*s[2]*s[3] #s[1]*s[2]*s[3] = 32*1*42
     x = x.view(-1,a)
     x = F.elu(self.fc1(x))
     x = F.elu(self.fc2(x))
     x = F.dropout(x,0.5)
     x = F.elu(self.fc3(x))
     x = F.sigmoid(self.fc4(x))
     return x
Example #10
0
    def forward(self, x):
        x = self.conv1(x)
        #x = F.dropout2d(x, p=0.5)

        x = self.conv2(x)
        x = F.dropout2d(x, p=self.dropout_rate, training=self.training)
        x = F.elu(self.conv2_bn(x))
        #x = F.relu(self.conv2_bn(x))

        x = x.view((-1, self.n_channels, self.post_conv_width))
        x = self.max_pool(x)

        x = x.view((x.size(0), -1))
        x = self.linear(x)

        x = F.log_softmax(x, dim=1)
        return x
Example #11
0
    def forward(self, x):
        fs = []
        for i in range(len(self.feats)):
            x = self.feats[i](x)
            #print(x.size())
            d = F.max_pool2d(x, kernel_size=2, stride=2)
            e = F.max_pool2d(x, kernel_size=2, stride=1)
            x = d
            f = self.score_feats[i](F.dropout2d(e))
            k = 1 << i
            if k > 1:
                f = F.interpolate(f, scale_factor=k)
            #print(f.size())
            fs.append(f)
            #print("f{}: ".format(i),fs[-1].size())

        return self.final(torch.cat(fs[2:], 1))
Example #12
0
    def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)
        conv4 = self.conv4(conv3)
        conv5 = self.conv5(conv4)
        #pool = self.pool(conv5) # deleted pooling
        #center = self.center(pool)
        center = self.center(conv5)
        dec5 = self.dec5(torch.cat([center, conv5], 1))
        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(dec2)
        dec0 = self.dec0(dec1)

        return self.final(F.dropout2d(dec0, p=self.dropout_2d))
Example #13
0
    def forward(self, x, z=None):
        # batch_size,C,H,W = x.shape
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        x[:, 0, :, :] = (x[:, 0, :, :] - mean[0]) / std[0]
        x[:, 1, :, :] = (x[:, 1, :, :] - mean[1]) / std[1]
        x[:, 2, :, :] = (x[:, 2, :, :] - mean[2]) / std[2]

        x = self.conv1(x)  # 128
        p = F.max_pool2d(x, kernel_size=2, stride=2)  # 64

        e1 = self.encoder1(p)  # 64
        e2 = self.encoder2(e1)  # 32
        e3 = self.encoder3(e2)  # 16
        e4 = self.encoder4(e3)  # 8

        c = self.center(e4)  # 4

        d5 = self.decoder5(c, e4)  # 8
        d4 = self.decoder4(d5, e3)  # 16
        d3 = self.decoder3(d4, e2)  # 32
        d2 = self.decoder2(d3, e1)  # 64
        d1 = self.decoder1(d2, x)  # 128

        f = torch.cat([
            d1,
            F.upsample(self.reducer2(d2),
                       scale_factor=2,
                       mode='bilinear',
                       align_corners=False),
            F.upsample(self.reducer3(d3),
                       scale_factor=4,
                       mode='bilinear',
                       align_corners=False),
            F.upsample(self.reducer4(d4),
                       scale_factor=8,
                       mode='bilinear',
                       align_corners=False),
            F.upsample(self.reducer5(d5),
                       scale_factor=16,
                       mode='bilinear',
                       align_corners=False),
        ], 1)
        f = F.dropout2d(f, p=0.20)
        logit = self.logit(f)
        return logit
Example #14
0
 def forward(self, user, clicked_news_length, candidate_news, clicked_news):
     """
     Args:
         user: batch_size,
         clicked_news_length: batch_size,
         candidate_news:
             [
                 {
                     "category": Tensor(batch_size),
                     "subcategory": Tensor(batch_size),
                     "title": Tensor(batch_size) * num_words_title
                 } * (1 + K)
             ]
         clicked_news:
             [
                 {
                     "category": Tensor(batch_size),
                     "subcategory": Tensor(batch_size),
                     "title": Tensor(batch_size) * num_words_title
                 } * num_clicked_news_a_user
             ]
     Returns:
         click_probability: batch_size
     """
     # 1 + K, batch_size, num_filters * 3
     candidate_news_vector = torch.stack(
         [self.news_encoder(x) for x in candidate_news])
     # ini: batch_size, num_filters * 3
     # con: batch_size, num_filters * 1.5
     # TODO what if not drop
     user = F.dropout2d(self.user_embedding(
         user.to(device)).unsqueeze(dim=0),
                        p=self.config.masking_probability,
                        training=self.training).squeeze(dim=0)
     # batch_size, num_clicked_news_a_user, num_filters * 3
     clicked_news_vector = torch.stack(
         [self.news_encoder(x) for x in clicked_news], dim=1)
     # batch_size, num_filters * 3
     user_vector = self.user_encoder(user, clicked_news_length,
                                     clicked_news_vector)
     # batch_size, 1 + K
     click_probability = torch.stack([
         self.click_predictor(x, user_vector) for x in candidate_news_vector
     ],
                                     dim=1)
     return click_probability
Example #15
0
    def forward(self, inputs_mean, inputs_variance):
        if self.training:
            binary_mask = torch.ones_like(inputs_mean)
            binary_mask = F.dropout2d(binary_mask, self.p, self.training,
                                      self.inplace)

            outputs_mean = inputs_mean * binary_mask
            outputs_variance = inputs_variance * binary_mask**2

            if self._keep_variance_fn is not None:
                outputs_variance = self._keep_variance_fn(outputs_variance)
            return outputs_mean, outputs_variance

        outputs_variance = inputs_variance
        if self._keep_variance_fn is not None:
            outputs_variance = self._keep_variance_fn(outputs_variance)
        return inputs_mean, outputs_variance
    def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(self.pool(conv1))
        conv3 = self.conv3(self.pool(conv2))
        conv4 = self.conv4(self.pool(conv3))
        conv5 = self.conv5(self.pool(conv4))

        center = self.center(self.pool(conv5))

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(torch.cat([dec2, conv1], 1))

        return self.final(F.dropout2d(dec1, p=self.dropout_2d))
    def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)
        conv4 = self.conv4(conv3)
        conv5 = self.conv5(conv4)
        #pool = self.pool(conv5) # deleted pooling
        #center = self.center(pool)
        center = self.center(conv5)
        dec5 = self.dec5(torch.cat([center, conv5], 1))
        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(dec2)
        dec0 = self.dec0(dec1)

        return self.final(F.dropout2d(dec0, p=self.dropout_2d))
Example #18
0
    def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(self.pool(conv1))
        conv3 = self.conv3(self.pool(conv2))
        conv4 = self.conv4(self.pool(conv3))
        conv5 = self.conv5(self.pool(conv4))

        center = self.center(self.pool(conv5))

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(torch.cat([dec2, conv1], 1))

        return self.final(F.dropout2d(dec1, p=self.dropout_2d))
Example #19
0
    def forward(self, x):
        conv1 = self.conv1(x)  # 8, 64, 56, 56
        conv2 = self.conv2(conv1)  # 8, 256, 56, 56
        conv3 = self.conv3(conv2)  # 8, 512, 28, 28
        conv4 = self.conv4(conv3)  # 8, 1024, 14, 14
        conv5 = self.conv5(conv4)  # 8, 2048, 7, 7

        center = self.center(self.pool(conv5))  # 8, 256, 6, 6

        dec5 = self.dec5(torch.cat([center, conv5], 1))
        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(dec2)
        dec0 = self.dec0(dec1)

        return self.final(F.dropout2d(dec0, p=self.dropout_2d))
Example #20
0
 def forward(self, x):
     x = self.conv1(x)
     x = F.relu(x)
     x = self.conv2(x)
     x = F.max_pool2d(x, 2)
     x = F.dropout2d(x, 0.1)
     x = self.conv3(x)
     x = F.relu(x)
     x = torch.flatten(x, 1)
     x = self.fc1(x)
     x = F.relu(x)
     # x = self.fc2(x)
     # x = F.relu(x)
     x = self.fc3(x)
     # output = F.softmax(x, dim=1)
     output = F.log_softmax(x, dim=1)
     return output
Example #21
0
 def forward(self, input, att, word):
     ## FC q
     word_W = F.dropout(word, self.dropout, training = self.training)
     weight = F.tanh(self.fcq_w(word_W)).view(-1,self.num_features,1,1)
     ## FC v
     v = F.dropout2d(input, self.dropout, training = self.training)
     v = v * F.relu(1-att).unsqueeze(1).expand_as(input)
     v = F.tanh(self.conv1(v))
     ## attMap
     inputAttShift = F.tanh(self.fcShift1(torch.cat((att.view(-1,self.num_outputs*14*14),word),1)))
     inputAttShift = F.tanh(self.fcShift2(inputAttShift)).view(-1,self.num_features,1,1)
     ## v * q_tile
     v = v * weight.expand_as(v) * inputAttShift.expand_as(v) # v = self.cbn1(F.tanh(v),word) #apply non-linear before cbn equal to MLB
     # no tanh shoulb be here
     v = self.conv2(v)
     # Normalize to single area
     return F.softmax(v.view(-1,14*14), dim=1).view(-1,self.num_outputs,14,14)
    def forward(self, x):
        # mean=[0.485, 0.456, 0.406]
        # std=[0.229,0.224,0.225]
        # x=torch.cat([
        #    (x-mean[2])/std[2],
        #    (x-mean[1])/std[1],
        #    (x-mean[0])/std[0],
        # ],1)

        e1 = self.conv1(x)
        #print(e1.size())
        e2 = self.encoder2(e1)
        #print('e2',e2.size())
        e3 = self.encoder3(e2)
        #print('e3',e3.size())
        e4 = self.encoder4(e3)
        #print('e4',e4.size())
        e5 = self.encoder5(e4)
        #print('e5',e5.size())

        f = self.center(e5)
        #print('f',f.size())
        d5 = self.decoder5(f, e5)
        d4 = self.decoder4(d5, e4)
        d3 = self.decoder3(d4, e3)
        d2 = self.decoder2(d3, e2)
        d1 = self.decoder1(d2)
        #print('d1',d1.size())

        f = torch.cat((
            F.upsample(
                e1, scale_factor=2, mode='bilinear', align_corners=False),
            d1,
            F.upsample(
                d2, scale_factor=2, mode='bilinear', align_corners=False),
            F.upsample(
                d3, scale_factor=4, mode='bilinear', align_corners=False),
            F.upsample(
                d4, scale_factor=8, mode='bilinear', align_corners=False),
            F.upsample(
                d5, scale_factor=16, mode='bilinear', align_corners=False),
        ), 1)

        f = F.dropout2d(f, p=0.50)
        logit = self.logit(f)
        return logit
    def forward(self, seq_id_list, statistics_input, statistics_seq_input_list,
                seq_lengths):
        batch_size = seq_id_list[0].shape[0]

        # 序列 id Embedding
        seq_feature_list = []
        for i, seq_id in enumerate(seq_id_list):
            feature_name = list(self.seq_embedding_features.keys())[i]
            embeddings = self.embeds[feature_name](seq_id.to(self.device))
            seq_feature_list.append(embeddings)

        seq_input = torch.cat(seq_feature_list, 2)
        seq_input = F.dropout2d(seq_input, 0.1, training=self.training)

        # LSTM
        seq_output, _ = self.lstm(seq_input)
        # mask padding
        mask = torch.zeros(seq_output.shape).to(self.device)
        for idx, seqlen in enumerate(seq_lengths):
            mask[idx, :seqlen] = 1

        seq_output = seq_output * mask
        lstm_output_max, _ = torch.max(seq_output, dim=1)

        # Attention
        Q = self.Q_weight(seq_output)
        K = self.K_weight(seq_output)
        V = self.V_weight(seq_output)

        tmp = torch.bmm(Q, K.transpose(1, 2))
        tmp = tmp / np.sqrt(self.attention_output_size)
        w = torch.softmax(tmp, 2)
        att_output = torch.bmm(w, V)
        att_output = att_output * mask
        att_max_output, _ = torch.max(att_output, dim=1)

        # 拼接统计特征
        cat_output = torch.cat(
            [att_max_output, lstm_output_max, statistics_input], 1)

        # DNN
        dnn_output = self.linears(cat_output)
        age_output = self.age_output(dnn_output)

        return age_output
Example #24
0
    def generate_init_samples(self, im: torch.Tensor) -> TensorList:
        # Do data augmentation
        transforms = [augmentation.Identity()]
        if 'shift' in self.params.augmentation:
            transforms.extend([
                augmentation.Translation(shift)
                for shift in self.params.augmentation['shift']
            ])
        if 'fliplr' in self.params.augmentation and self.params.augmentation[
                'fliplr']:
            transforms.append(augmentation.FlipHorizontal())
        if 'rotate' in self.params.augmentation:
            transforms.extend([
                augmentation.Rotate(angle)
                for angle in self.params.augmentation['rotate']
            ])
        if 'blur' in self.params.augmentation:
            transforms.extend([
                augmentation.Blur(sigma)
                for sigma in self.params.augmentation['blur']
            ])

        init_samples = self.params.features.extract_transformed(
            im, self.pos, self.target_scale, self.img_sample_sz, transforms)

        # Remove augmented samples for those that shall not have
        for i, use_aug in enumerate(
                self.fparams.attribute('use_augmentation')):
            if not use_aug:
                init_samples[i] = init_samples[i][0:1, ...]

        if 'dropout' in self.params.augmentation:
            num, prob = self.params.augmentation['dropout']
            for i, use_aug in enumerate(
                    self.fparams.attribute('use_augmentation')):
                if use_aug:
                    init_samples[i] = torch.cat([
                        init_samples[i],
                        F.dropout2d(init_samples[i][0:1, ...].expand(
                            num, -1, -1, -1),
                                    p=prob,
                                    training=True)
                    ])

        return init_samples
Example #25
0
    def forward(self, x):
        x_crop_start = (x.shape[-1] - 101) // 2
        x_crop_end = x_crop_start + 101
        x = x[:, :, x_crop_start:x_crop_end, x_crop_start:x_crop_end]
        #resize image to 256*256
        x = F.upsample(x, size=(256, 256), mode="bilinear")
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]

        x = torch.cat([
            (x - mean[2]) / std[2],
            (x - mean[1]) / std[1],
            (x - mean[0]) / std[0],
        ], 1)

        x = self.conv1(x)  #64, 64, 64
        e2 = self.encoder2(x)  #64, 64, 64
        e3 = self.encoder3(e2)  #128, 32, 32
        e4 = self.encoder4(e3)  #256, 16, 16
        e5 = self.encoder5(e4)  #512, 8, 8

        f = self.center(e5)  #256, 4, 4
        d5 = self.decoder5(f, e5)  #64, 8, 8
        d4 = self.decoder4(d5, e4)  #64, 16, 16
        d3 = self.decoder3(d4, e3)  #64, 32, 32
        d2 = self.decoder2(d3, e2)  #64, 64, 64
        d1 = self.decoder1(d2)  #64, 128, 128

        f = torch.cat((
            d1,
            F.upsample(
                d2, scale_factor=2, mode='bilinear', align_corners=False),
            F.upsample(
                d3, scale_factor=4, mode='bilinear', align_corners=False),
            F.upsample(
                d4, scale_factor=8, mode='bilinear', align_corners=False),
            F.upsample(
                d5, scale_factor=16, mode='bilinear', align_corners=False),
        ), 1)  #320, 128, 128
        f = self.se_f(f)
        f = F.dropout2d(f, p=0.5)
        out = F.upsample(f, size=(101, 101), mode="bilinear").squeeze()
        out = self.outc(out)  #1, 101,101

        return out
    def learn_from_batch_ae(self, data, device):
        """
        Generate loss value.

        Args:
          data (arr): input data
          device (str): choice of gpu or cpu for running model

        Returns:
          loss (float): loss value
        """
        seq = data["sequence"].clone()
        seq[:, :, :14] = F.dropout2d(seq[:, :, :14], p=0.3)
        target = data["sequence"][:, :, :14]
        out = self.model(seq.to(device), data["bpp"].to(device))
        loss = F.binary_cross_entropy(out, target.to(device))

        return loss
Example #27
0
    def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        gcn2 = self.enc_br2(self.gcn2(encoder2))
        gcn3 = self.enc_br3(self.gcn3(encoder3))
        gcn4 = self.enc_br4(self.gcn4(encoder4))
        gcn5 = self.enc_br5(self.gcn5(encoder5))

        decoder5 = self.deconv5(gcn5)
        decoder4 = self.deconv4(self.dec_br4(decoder5 + gcn4))
        decoder3 = self.deconv3(self.dec_br3(decoder4 + gcn3))
        decoder2 = self.dec_br1(self.deconv2(self.dec_br2(decoder3 + gcn2)))

        if self.pool0:
            decoder2 = self.dec_br0_2(self.deconv1(self.dec_br0_1(decoder2)))

        return self.final(decoder2)
Example #28
0
    def forward(self, x1, x2):
        x1 = self.up(x1)

        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(
            x1,
            (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2))

        # for padding issues, see
        # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
        # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd

        x = torch.cat([x2, x1], dim=1)
        x = F.dropout2d(self.conv(x), p=0.25, training=True)
        return x
Example #29
0
    def forward(self, x):
        x = self.exp(x)
        out = self.relu(self.conv_input(x))
        residual = out
        out = self.residual(out)

        clf_out = torch.flatten(self.avgpool(self.conv_clf(out)), 1)
        clf_out = F.dropout2d(clf_out, training=self.training)
        clf_out = self.fc(clf_out)

        out = self.bn_mid(self.conv_mid(out))
        out = torch.add(out,residual)
        shape = out.shape
        out = out.view(shape[0], shape[1]//(self.duration//self.upscale), self.duration//self.upscale, *shape[2:])
        out = self.upscale1x(out)
        out = self.residual2(out)
        out = self.conv_output(out)
        return self.activation(out), F.log_softmax(clf_out, dim=1)
Example #30
0
 def forward(self, x):
     x = self.first(x)
     e1 = self.enc1(x)
     e2 = self.enc2(e1)
     e3 = self.enc3(e2)
     e4 = self.enc4(e3)
     d1 = torch.cat([self.dec1(e4), e3], 1)
     d2 = torch.cat([self.dec2(d1), e2], 1)
     d3 = torch.cat([self.dec3(d2), e1], 1)
     d4 = self.dec4(d3)
     p1 = self.fpn1(d4)
     p2 = self.fpn2(d3)
     p3 = self.fpn3(d2)
     p4 = self.fpn4(d1)
     out = torch.cat([p1, p2, p3, p4], 1)
     out = F.dropout2d(out, 0.3, training=self.training)
     out = self.final(out)
     return activation(out)
Example #31
0
 def forward(self, x):
     e1, e2, e3, e4, e5 = self.encoder(x)
     c = self.center(e5)
     d5, d4, d3, d2, d1 = self.decoder(c, e5, e4, e3, e2)
     f = torch.cat(
         (d1,
          F.interpolate(
              d2, scale_factor=2, mode='bilinear', align_corners=True),
          F.interpolate(
              d3, scale_factor=4, mode='bilinear', align_corners=True),
          F.interpolate(
              d4, scale_factor=8, mode='bilinear', align_corners=True),
          F.interpolate(
              d5, scale_factor=16, mode='bilinear', align_corners=True)),
         1)  # 320, 128, 128
     f = F.dropout2d(f, p=0.2)
     logit = self.logit(f)  # 1, 128, 128
     return logit
Example #32
0
    def forward(self, x):
        if self.dist == 'bernoulli' and not self.linear:
            out = F.dropout2d(x, self.p, self.training)
        elif self.dist == 'bernoulli' and self.linear:
            out = F.dropout(x, self.p, self.training)
        elif self.dist == 'gaussian':
            if self.training:
                with torch.no_grad():
                    soft_mask = x.new_empty(x.size()).normal_(self.mu, self.sigma).clamp_(0., 1.)

                scale = 1. / self.mu
                out = scale * soft_mask * x
            else:
                out = x
        else:
            out = x

        return out
Example #33
0
    def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
        with torch.no_grad():
            x = self.embed_scale * self.tgt_embed(tgt_sents)
            x = F.dropout2d(x, p=self.dropword, training=self.training)
            x += self.pos_enc(tgt_sents)
            x = F.dropout(x, p=0.2, training=self.training)

            mask = tgt_masks.eq(0)
            key_mask = src_masks.eq(0)
            for layer in self.layers:
                x = layer.init(x, mask, src_enc, key_mask, init_scale=init_scale)

            x = x * tgt_masks.unsqueeze(2)
            mu = self.mu.init(x, init_scale=0.05 * init_scale) if init_mu else self.mu(x)
            logvar = self.logvar.init(x, init_scale=0.05 * init_scale) if init_var else self.logvar(x)
            mu = mu * tgt_masks.unsqueeze(2)
            logvar = logvar * tgt_masks.unsqueeze(2)
            return mu, logvar
    def forward(self, x):


        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)

        x = self.inception1(x)
        x = F.max_pool2d(x, 2, 2)
        x = self.inception2(x)
        x = self.conv2(x)

        #x = F.max_pool2d(x, 2, 2)
        x = F.dropout2d(x, 0.5)
        x = x.view(-1, 50* 4 * 4)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        x = F.softmax(x)
        x = torch.exp(x)
        return x
    def forward(self, input_tensor):
        """Forward propagation of network model with additional layers
          Args:
            input_tensor - input tensor
          Returns:
            logits - calculated output logits
        """

        x = self.base_model.features(input_tensor)
        x = F.dropout2d(x, p=self.keep_prob, training=self.training, inplace=self.inplace)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        x = self.bn_f1(x)
        x = F.relu(x, inplace=self.inplace)
        x = F.dropout(x, p=self.keep_dense_prob, training=self.training, inplace=self.inplace)
        logits = self.fc_logits(x)

        return logits
Example #36
0
    def forward(self, x):

        e1 = self.conv1(x)  #64, 128, 800
        #print('e1',e1.size())
        e2 = self.encoder2(e1)  #64, 128, 800
        #print('e2',e2.size())
        e3 = self.encoder3(e2)  #128, 64, 400
        #print('e3',e3.size())
        e4 = self.encoder4(e3)  #256, 32, 200
        #print('e4',e4.size())
        e5 = self.encoder5(e4)  #512, 16, 100
        #print('e5',e5.size())

        f = self.center(e5)  #256, 8, 50
        #print('f',f.size())
        d5 = self.decoder5(f, e5)  #64, 16, 100
        #print('d5',d5.size())
        d4 = self.decoder4(d5, e4)  #64, 32, 200
        #print('d4', d4.size())
        d3 = self.decoder3(d4, e3)  #64, 64, 400
        #print('d3', d3.size())
        d2 = self.decoder2(d3, e2)  #64, 128, 800
        #print('d2', d2.size())
        d1 = self.decoder1(d2)  #64, 256, 1600
        #print('d1',d1.size())

        f = torch.cat((
            F.upsample(
                e1, scale_factor=2, mode='bilinear', align_corners=False),
            d1,
            F.upsample(
                d2, scale_factor=2, mode='bilinear', align_corners=False),
            F.upsample(
                d3, scale_factor=4, mode='bilinear', align_corners=False),
            F.upsample(
                d4, scale_factor=8, mode='bilinear', align_corners=False),
            F.upsample(
                d5, scale_factor=16, mode='bilinear', align_corners=False),
        ), 1)

        f = F.dropout2d(f, p=0.50)
        logit = self.logit(f)
        return logit
    def forward(self, inputs, hidden_state):
        seq_len, batch, channel, height, width = inputs.size()
        i2h = self.i2h(torch.reshape(inputs, (-1, channel, height, width)))
        i2h = torch.reshape(
            i2h, (seq_len, batch, i2h.size(1), i2h.size(2), i2h.size(3)))
        i2h_slice = torch.split(i2h, self.hidden_dim, dim=2)

        previous_hidden = hidden_state
        outputs = []
        for i in range(seq_len):
            flows = self._flow_generator(inputs[i], previous_hidden)
            wrapped_data = []
            for j in range(len(flows)):
                flow = flows[j]
                wrapped_data.append(
                    self.wrap(previous_hidden, -flow, self.device))
            wrapped_data = torch.cat(wrapped_data, dim=1)
            h2h = self.ret(wrapped_data)
            h2h_slice = torch.split(h2h, self.hidden_dim, dim=1)
            reset_gate = torch.sigmoid(i2h_slice[0][i, ...] + h2h_slice[0])
            update_gate = torch.sigmoid(i2h_slice[1][i, ...] + h2h_slice[1])
            new_mem = self.activation(i2h_slice[2][i, ...] +
                                      reset_gate * h2h_slice[2])
            next_hidden = update_gate * previous_hidden + (
                1 - update_gate) * new_mem

            if self.debug:
                self.log("reset gate at t={}".format(i),
                         self.get_size(reset_gate))
                self.log("update gate at t={}".format(i),
                         self.get_size(update_gate))
                self.log("next hidden at t={}".format(i),
                         self.get_size(next_hidden))

            if self.zoneout > 0.0:
                mask = F.dropout2d(torch.zeros_like(previous_hidden),
                                   p=self.zoneout)
                next_hidden = torch.where(mask, next_hidden, previous_hidden)

            outputs.append(next_hidden)
            previous_hidden = next_hidden

        return torch.stack(outputs), next_hidden
Example #38
0
    def forward(self, x):
        x0, x1, x2, x3, x4 = self.encoder(x)

        d = self.center(x4)
        d5 = self.decoder5(d, x4)
        d4 = self.decoder4(d5, x3)
        d3 = self.decoder3(d4, x2)
        d2 = self.decoder2(d3, x1)
        d1 = self.decoder1(d2)

        out = torch.cat([F.interpolate(x0, scale_factor=2, mode='bilinear', align_corners=True),
                         d1,
                         F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=True),
                         F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=True),
                         F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=True),
                         F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=True)], 1)
        out = F.dropout2d(out, .5)
        out = self.cbr_last(out)
        return out