def forward(self, x, alpha):
        # (x has shape (batch_size, 3, h, w))

        h = x.size()[2]
        w = x.size()[3]

        feature_map = self.network.get_feature(
            x
        )  # (shape: (batch_size, 512, h/16, w/16)) (assuming self.resnet is ResNet18_OS16 or ResNet34_OS16. If self.resnet is ResNet18_OS8 or ResNet34_OS8, it will be (batch_size, 512, h/8, w/8). If self.resnet is ResNet50-152, it will be (batch_size, 4*512, h/16, w/16))

        output = self.network(
            x)  # (shape: (batch_size, num_classes, h/16, w/16))

        reverse_feature = ReverseLayerF.apply(feature_map, alpha)
        print(reverse_feature.shape)
        temp = self.conv1(reverse_feature)
        #
        # Your code here
        print(temp.shape)
        temp = F.max_pool2d(temp, (7, 7))
        print(temp.shape)
        temp = temp.view(temp.size(0), -1)
        print(temp.shape)
        temp = self.fc(temp)
        print(temp.shape)
        temp = F.log_softmax(temp, dim=1)
        print(temp.shape)
        return output, temp
Пример #2
0
 def forward(self, noise, att, alpha):
     h = torch.cat((noise, att), 1)
     h1 = self.lrelu(self.fc1(h))
     h2 = self.relu(self.fc2(h1))
     feature = h2
     reverse_feature = ReverseLayerF.apply(feature, alpha)
     return feature, reverse_feature
Пример #3
0
    def forward(self, x, alpha):
        # print("x before model shape:"+str(x.shape))
        out16 = self.in_tr(x)
        out32 = self.down_tr32(out16)
        out64 = self.down_tr64(out32)
        out128 = self.down_tr128(out64)
        # print("out16(in_tr) shape:"+str(out16.shape))
        # print("out32(down_tr32) shape:"+str(out32.shape))
        # print("out64(down_tr64) shape:"+str(out64.shape))
        # print("out128(down_tr128) shape:"+str(out128.shape))
        out256 = self.down_tr256(out128)
        # print("out256(down_tr256) shape:"+str(out256.shape))

        feature = out256.view(-1, 256 * 4 * 4 * 8)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        domain_output = self.domain_classifier(reverse_feature)

        out = self.up_tr256(out256, out128)
        # print("up_tr256 shape:"+str(out.shape))
        out = self.up_tr128(out, out64)
        # print("up_tr128 shape:"+str(out.shape))
        out = self.up_tr64(out, out32)
        # print("up_tr64 shape:"+str(out.shape))
        out = self.up_tr32(out, out16)
        # print("up_tr32 shape:"+str(out.shape))
        out = self.out_tr(out)
        # print("out_tr shape:"+str(out.shape))
        return out, domain_output
 def forward(self, x, alpha):
     classify = self.fc1(x)
     rev = ReverseLayerF.apply(x, alpha)
     d = self.fc2(rev)
     d = self.bn1(d)
     d = F.relu(d)
     discriminate = self.fc3(d)
     return classify, discriminate
    def forward(self, input_data, alpha):
        input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
        feature = self.feature(input_data)
        feature = feature.view(-1, 50 * 4 * 4)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        domain_output = self.domain_classifier(reverse_feature)

        return domain_output
Пример #6
0
    def dann(self, input_data, alpha):
        feature = self.feature(input_data)
        feature = feature.view(feature.size(0), -1)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output, feature
Пример #7
0
    def forward(self, input_data, alpha):
        feature = self.feature(input_data)
        feature = feature.view(-1, 120)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
Пример #8
0
    def forward(self, input_data, alpha):
        input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
        feature = self.feature(input_data)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
Пример #9
0
    def forward(self, input_data, alpha):
        input_data = input_data.float()
        feature = self.feature(input_data)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
Пример #10
0
    def forward(self, input_data, mode, rec_scheme, p=0.0):

        result = []

        if mode == 'source':

            # source private encoder
            private_feat = self.source_encoder_conv(input_data)
            #print('source private feature shape before fc:', private_feat.shape)
            private_feat = private_feat.view(-1, 64 * 7 * 7)
            private_code = self.source_encoder_fc(private_feat)
            #print('source private feature shape:', private_code.shape)

        elif mode == 'target':

            # target private encoder
            private_feat = self.target_encoder_conv(input_data)
            private_feat = private_feat.view(-1, 64 * 7 * 7)
            private_code = self.target_encoder_fc(private_feat)

        result.append(private_code)

        # shared encoder
        shared_feat = self.shared_encoder_conv(input_data)
        #print('shared encoder feature shape before fc', shared_feat.shape)
        shared_feat = shared_feat.view(-1, 48 * 7 * 7)
        shared_code = self.shared_encoder_fc(shared_feat)
        #print('shared encoder feature shape', shared_code.shape)
        result.append(shared_code)

        reversed_shared_code = ReverseLayerF.apply(shared_code, p)
        domain_label = self.shared_encoder_pred_domain(reversed_shared_code)
        result.append(domain_label)

        if mode == 'source':
            class_label = self.shared_encoder_pred_class(shared_code)
            #print('source class label', class_label.shape)
            result.append(class_label)

        # shared decoder

        if rec_scheme == 'share':
            union_code = shared_code
        elif rec_scheme == 'all':
            union_code = private_code + shared_code
        elif rec_scheme == 'private':
            union_code = private_code

        rec_vec = self.shared_decoder_fc(union_code)
        #print('rec decoder feature shape before cov', rec_vec.shape)
        rec_vec = rec_vec.view(-1, 3, 14, 14)

        rec_code = self.shared_decoder_conv(rec_vec)
        #print('rec decoder feature shape', rec_code.shape)
        result.append(rec_code)

        return result
Пример #11
0
    def forward(self, input, cst):
        input = input.expand(input.data.shape[0], 3, 28, 28)
        attr = self.attr(input)
        attr = attr.view(-1, 50 * 4 * 4)
        reverse_attr = ReverseLayerF.apply(attr, cst)
        class_output = self.cclass(attr)
        domain_output = self.domain(reverse_attr)

        # return class_output, domain_output
        return class_output, domain_output, attr
Пример #12
0
    def forward(self, x, alpha):
        # import pdb; pdb.set_trace()
        batch_size = len(x)
        fe_out = self.fe(x)
        fe_out = fe_out.view(batch_size, -1)
        reverse_fe = ReverseLayerF.apply(fe_out, alpha)
        label_out = self.label_classifier(fe_out)
        domain_out = self.domain_classifier(reverse_fe)

        return label_out, domain_out
Пример #13
0
    def forward(self, input_data, alpha):
        input_data = input_data.view(input_data.data.shape[0], 1,
                                     self.max_length, self.input_dim)
        feature = self.feature(input_data)
        feature = feature.view(-1, 30 * 5 * 5)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
Пример #14
0
    def forward(self, input_data, mode, rec_scheme, p=0.0):

        result = []

        if mode == 'source':

            # source private encoder
            private_feat = self.source_encoder_conv(input_data)
            private_feat = private_feat.view(-1, 64 * 7 * 7)
            private_code = self.source_encoder_fc(private_feat)

        elif mode == 'target':

            # target private encoder
            private_feat = self.target_encoder_conv(input_data)
            private_feat = private_feat.view(-1, 64 * 7 * 7)
            private_code = self.target_encoder_fc(private_feat)

        result.append(private_code)

        # shared encoder
        shared_feat = self.shared_encoder_conv(input_data)
        shared_feat = shared_feat.view(-1, 48 * 7 * 7)
        shared_code = self.shared_encoder_fc(shared_feat)
        result.append(shared_code)

        reversed_shared_code = ReverseLayerF.apply(shared_code, p)
        domain_label = self.shared_encoder_pred_domain(reversed_shared_code)
        result.append(domain_label)

        if mode == 'source':
            class_label = self.shared_encoder_pred_class(shared_code)
            result.append(class_label)
        # if we add some target labeled data in training dataset
        if mode == 'target':
            class_label = self.shared_encoder_pred_class(shared_code)
            result.append(class_label)

        # shared decoder

        if rec_scheme == 'share':
            union_code = shared_code
        elif rec_scheme == 'all':
            union_code = private_code + shared_code
        elif rec_scheme == 'private':
            union_code = private_code

        rec_vec = self.shared_decoder_fc(union_code)
        rec_vec = rec_vec.view(-1, 3, 14, 14)

        rec_code = self.shared_decoder_conv(rec_vec)
        result.append(rec_code)

        return result
Пример #15
0
 def forward(self, x, alpha):
     x = ReverseLayerF.apply(x, alpha)
     x = self.ad_layer1(x)
     x = self.relu1(x)
     x = self.dropout1(x)
     x = self.ad_layer2(x)
     x = self.relu2(x)
     x = self.dropout2(x)
     x = self.ad_layer3(x)
     x = self.sigmoid(x)
     return x
Пример #16
0
    def forward(self, input_data, alpha):
        feature4096 = self.feature(input_data)

        feature = self.bottleneck(feature4096)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output_st = self.dc_ip3_st(
            self.domain_classifier(reverse_feature))
        domain_output_mt = self.dc_ip3_mt(
            self.domain_classifier(reverse_feature))

        return class_output, domain_output_st, domain_output_mt, feature4096
    def forward(self, x, alpha):

        x = x.to(self.device)
        x = x.squeeze().transpose(0, 1)
        kld_loss, nll_loss, (all_enc_mean,
                             all_enc_std), (all_dec_mean,
                                            all_dec_std), x1, h = self.vrnn(x)
        reverse_feature = ReverseLayerF.apply(h.squeeze(), alpha)
        regressor_output = self.regressor(reverse_feature)
        domain_class_output = self.domain_classifier(reverse_feature)

        return regressor_output, domain_class_output, kld_loss, nll_loss
Пример #18
0
    def forward(self, input_data, alpha, phase, class_out):
        if phase == 'train' and class_out:
            # input_data = input_data.expand(input_data.data.shape[0], 3 ,400, 400)
            # feature = self.feature(input_data)
            # # print(feature.shape)

            input_data = input_data.view(-1, 512)

            # # print(feature.shape)
            reverse_feature = ReverseLayerF.apply(input_data, alpha)
            # print(reverse_feature.shape)
            class_output = self.class_classifier(input_data)
            # print(class_output.shape)
            domain_output = self.domain_classifier(reverse_feature)

            domain_output_class = self.domain_classifier_1(reverse_feature)
            # print(class_output.shape)
            # print(domain_output)
            return class_output, domain_output, domain_output_class

        if phase == 'train' and not class_out:
            input_data = input_data.view(-1, 512)

            # # print(feature.shape)
            reverse_feature = ReverseLayerF.apply(input_data, alpha)
            # print(reverse_feature.shape)
            class_output = self.class_classifier(input_data)
            # print(class_output.shape)
            domain_output = self.domain_classifier(reverse_feature)
            domain_output_class = self.domain_classifier_0(reverse_feature)
            # print(class_output.shape)
            # print(domain_output)
            return class_output, domain_output, domain_output_class

        else:
            input_data = input_data.view(-1, 512 * 1 * 1)
            class_output = self.class_classifier(input_data)
            return class_output
Пример #19
0
    def forward(self, input_data1, input_data2, alpha):
        
        z1 = self.encode1(input_data1)
        z2 = self.encode2(input_data2)
        
        reconstructed = self.decode(z2)

        z = torch.cat([z1, z2], axis=1)        

        reverse_z = ReverseLayerF.apply(z, alpha)
        class_output = self.sentiment_classifier(z)
        domain_output = self.domain_classifier(reverse_z)

        return reconstructed, class_output, domain_output
Пример #20
0
    def forward(self, input_data, alpha):
        input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
        feature = self.feature(input_data)
        feature = feature.view(-1, 50 * 4 * 4)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        # return class_output, domain_output
        return class_output, domain_output, feature


# dann = CNNModel()
# print(dann)
Пример #21
0
    def forward(self, input_data, alpha):
        #print ("Input data before", input_data.shape)
        input_data = input_data.expand(input_data.data.shape[0], 1,
                                       input_data.data.shape[3],
                                       input_data.data.shape[3])
        #print ("After expand", input_data.shape)
        feature = self.feature(input_data)
        #print ("After feature", feature.shape)
        feature = feature.view(-1, 50 * 13 * 13)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
Пример #22
0
    def forward(self, source, target, s_label, DEV, alpha=0.0):
        source_share = self.sharedNet(source)
        source_share = self.bottleneck(source_share)
        source = self.source_fc(source_share)
        p_source = self.softmax(source)

        target = self.sharedNet(target)
        target = self.bottleneck(target)
        t_label = self.source_fc(target)
        p_target = self.softmax(t_label)
        t_label = t_label.data.max(1)[1]
        s_out = []
        t_out = []
        if self.training == True:
            # RevGrad
            s_reverse_feature = ReverseLayerF.apply(source_share, alpha)
            t_reverse_feature = ReverseLayerF.apply(target, alpha)
            s_domain_output = self.domain_classifier(s_reverse_feature)
            t_domain_output = self.domain_classifier(t_reverse_feature)

            # p*feature-> classifier_i ->loss_i
            for i in range(self.classes):
                ps = p_source[:, i].reshape((target.shape[0],1))
                fs = ps * s_reverse_feature
                pt = p_target[:, i].reshape((target.shape[0],1))
                ft = pt * t_reverse_feature

                outsi = self.domain_classifiers[i](fs.cpu()).to(DEV)
                s_out.append(outsi)
                outti = self.domain_classifiers[i](ft.cpu()).to(DEV)
                t_out.append(outti)
        else:
            s_domain_output = 0
            t_domain_output = 0
            s_out = [0]*self.classes
            t_out = [0]*self.classes
        return source, s_domain_output, t_domain_output, s_out, t_out
Пример #23
0
 def forward(self, input_data, alpha, return_ddc_features=None):
     if return_ddc_features is not None:
         assert return_ddc_features in self.class_classifier._modules
     feature = self.feature(input_data)
     reverse_feature = ReverseLayerF.apply(feature, alpha)
     class_output = feature
     ddc_features = None
     for k, v in self.class_classifier._modules.items():
         class_output = v(class_output)
         if k == return_ddc_features:
             ddc_features = class_output
     domain_output = self.domain_classifier(reverse_feature)
     if return_ddc_features:
         return class_output, domain_output, ddc_features
     else:
         return class_output, domain_output
Пример #24
0
    def forward(self, input_data, alpha):
        # input_data = input_data.expand(input_data.data.shape[0], 3 ,400, 400)
        # input_data=self.main(input_data)
        # # print(feature.shape)

        input_data = input_data.view(-1, 512 * 7 * 7)

        # # print(feature.shape)
        reverse_feature = ReverseLayerF.apply(input_data, alpha)
        # print(reverse_feature.shape)
        class_output = self.class_classifier(input_data)
        # print(class_output.shape)
        domain_output = self.domain_classifier(reverse_feature)

        # print(class_output.shape)
        # print(domain_output)
        return class_output, domain_output
Пример #25
0
 def forward(self, input_data, alpha, return_ddc_features=None):
     if return_ddc_features is not None:
         assert return_ddc_features in self.class_classifier._modules
     input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
     feature = self.feature(input_data)
     feature = feature.view(-1, 50 * 4 * 4)
     reverse_feature = ReverseLayerF.apply(feature, alpha)
     # class_output = self.class_classifier(feature)
     class_output = feature
     ddc_features = None
     for k, v in self.class_classifier._modules.items():
         class_output = v(class_output)
         if k == return_ddc_features:
             ddc_features = class_output
     domain_output = self.domain_classifier(reverse_feature)
     if return_ddc_features:
         return class_output, domain_output, ddc_features
     else:
         return class_output, domain_output
Пример #26
0
    def forward(self, input_data, alpha=1.0, return_ddc_features=None):
        if return_ddc_features is not None:
            assert return_ddc_features in self.class_classifier._modules or return_ddc_features == "tabnet_features"
        # feature = self.embedder(input_data)
        feature, M_loss = self.tabnet(input_data)

        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = feature
        ddc_features = feature
        for k, v in self.class_classifier._modules.items():
            class_output = v(class_output)
            if k == return_ddc_features:
                ddc_features = class_output
        domain_output = self.domain_classifier(reverse_feature)

        if return_ddc_features:
            return class_output, domain_output, ddc_features, M_loss
        else:
            return class_output, domain_output, M_loss
Пример #27
0
    def forward(self, input_data, mode, Rec_scheme, alpha):

        if mode == 'source':
            private_enc = self.private_enc_src
            pri_enc_fc = self.private_fc_src
        else:
            input_data = input_data.expand(input_data.data.shape[0],
                                           self.channels, 32, 32)
            private_enc = self.private_enc_tgt
            pri_enc_fc = self.private_fc_src

        #private encoder
        private_feat = private_enc(input_data)
        private_feat = private_feat.view(-1, 64 * 8 * 8)
        private_feat_code = pri_enc_fc(private_feat)

        #shared encoder
        shared_feat = self.shared_enc(input_data)
        shared_feat = shared_feat.view(-1, 64 * 8 * 8)
        shared_feat_code = self.shd_enc_fc(shared_feat)

        #label classifier
        pred_label = self.classifier(shared_feat_code)

        #domain classifier
        reverse_feature = ReverseLayerF.apply(shared_feat_code, alpha)
        pred_domain = self.domain_dis(reverse_feature)

        #shared decoder
        if Rec_scheme == 'private':
            rec_feat_code = private_feat_code
        if Rec_scheme == 'shared':
            rec_feat_code = shared_feat_code
        if Rec_scheme == 'all':
            rec_feat_code = private_feat_code + shared_feat_code

        feat_encode = self.shd_dec_fc(rec_feat_code)
        feat_encode = feat_encode.view(-1, 3, 10, 10)
        img_rec = self.shared_dec(feat_encode)

        return private_feat_code, shared_feat_code, pred_label, pred_domain, img_rec
Пример #28
0
    def forward(self, input_data, alpha, training):
        # input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
        input_data = input_data.expand(input_data.data.shape[0], 3, 32, 32)
        # feature = self.feature(input_data)
        feature = self.feature1(input_data)
        if training:
            feature = self.dropout1(feature)
            feature += torch.zeros(feature.size()).normal_(0, 1).cuda()
        feature = self.feature2(feature)
        if training:
            feature = self.dropout2(feature)
            feature += torch.zeros(feature.size()).normal_(0, 1).cuda()
        class_output = self.class_classifier(feature)

        # feature = feature.view(-1, 50 * 4 * 4)
        feature = feature.view(feature.shape[0], -1)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        # class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
 def forward(self, x, iter_num):
     coefficient = self.calc_coeff(iter_num)
     reversed_x = ReverseLayerF.apply(x, coefficient)
     return self.forward_pass(reversed_x)
Пример #30
0
    def forward(self,
                im_data,
                im_info,
                gt_boxes,
                num_boxes,
                domain=None,
                l=0,
                loss_start=False):
        batch_size = im_data.size(0)

        im_info = im_info.data
        gt_boxes = gt_boxes.data
        num_boxes = num_boxes.data

        # feed image data to base model to obtain base feature map
        base_feat = self.RCNN_base(im_data)

        # feed base feature map tp RPN to obtain rois
        rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(
            base_feat, im_info, gt_boxes, num_boxes)

        # if it is training phrase, then use ground trubut bboxes for refining
        if self.training:
            roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes,
                                                 domain, self.transfer)
            rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws, domain_label = roi_data

            rois_label = Variable(rois_label.view(-1).long())
            rois_target = Variable(rois_target.view(-1, rois_target.size(2)))
            rois_inside_ws = Variable(
                rois_inside_ws.view(-1, rois_inside_ws.size(2)))
            rois_outside_ws = Variable(
                rois_outside_ws.view(-1, rois_outside_ws.size(2)))
        else:
            rois_label = None
            rois_target = None
            rois_inside_ws = None
            rois_outside_ws = None
            rpn_loss_cls = 0
            rpn_loss_bbox = 0

        rois = Variable(rois)
        # do roi pooling based on predicted rois

        if cfg.POOLING_MODE == 'crop':
            # pdb.set_trace()
            # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))
            grid_xy = _affine_grid_gen(rois.view(-1, 5),
                                       base_feat.size()[2:], self.grid_size)
            grid_yx = torch.stack(
                [grid_xy.data[:, :, :, 1], grid_xy.data[:, :, :, 0]],
                3).contiguous()
            pooled_feat = self.RCNN_roi_crop(base_feat,
                                             Variable(grid_yx).detach())
            if cfg.CROP_RESIZE_WITH_MAX_POOL:
                pooled_feat = F.max_pool2d(pooled_feat, 2, 2)
        elif cfg.POOLING_MODE == 'align':
            pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))
        elif cfg.POOLING_MODE == 'pool':
            pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1, 5))

        #-----------------------transfer learning----------------------------#
        #print(domain)
        dom_loss = 0

        #base line: transfer == False
        if self.training and self.transfer:
            if self.grl:
                dom_input = ReverseLayerF.apply(pooled_feat, l)
            else:
                dom_input = pooled_feat

            dom_pred = self._domain_classify(dom_input)
            domain_label = Variable(domain_label.cpu().cuda().view(-1).long())

            ############Process Tranfer Loss Weight#########
            if loss_start:
                p_target = F.softmax(dom_pred * self.transfer_gamma)[:, 0]
                domain_label.data = domain_label.data.type(
                    torch.FloatTensor).cuda()
                l_target = domain_label

                self.weight = p_target**l_target
            ###############################################

            ##############DOMAIN LOSS SELECTION##########

            else:
                ids = torch.LongTensor(1).cuda()

                # random select
                if self.transfer_select == 'RANDOM':
                    perm = torch.randperm(rois.size(1))
                    ids = perm[:rois.size(1) / 8].cuda()

                # select positive sample and predicted postive sample
                elif self.transfer_select == 'CONDITION':
                    ids = torch.range(0, rois.size(1) / 8 - 1)
                    ids = torch.Tensor.long(ids).cuda()

                # select all postive sample
                elif self.transfer_select == 'POSITIVE':
                    ids = torch.nonzero(rois_label.data)
                    ids = torch.squeeze(ids).cuda()

                # select all postive sample
                elif self.transfer_select == 'BALANCE':
                    ids_p = torch.nonzero(rois_label.data)
                    ids_p = torch.squeeze(ids_p).cuda()

                    ids_n = (rois_label.data == 0).nonzero()
                    ids_n = torch.squeeze(ids_n).cuda()
                    ids_n = ids_n[:ids_p.size(0)]

                    ids = torch.cat((ids_p, ids_n), 0).cuda()

                # select all sample
                if self.transfer_select == 'ALL':
                    dom_pred_loss = dom_pred
                    dom_label_loss = domain_label
                else:
                    dom_pred_loss = dom_pred[ids]
                    dom_label_loss = domain_label[ids]

                ##########DOMAIN LOSS SELECTION DONE##########

                dom_loss = F.cross_entropy(dom_pred_loss, dom_label_loss)

                dom_loss = dom_loss * (
                    self.transfer_weight.expand_as(dom_loss))
        #---------------------transfer learning done-------------------------#

        # feed pooled features to top model
        pooled_feat = self._head_to_tail(pooled_feat)

        # compute bbox offset
        bbox_pred = self.RCNN_bbox_pred(pooled_feat)
        if self.training and not self.class_agnostic:
            # select the corresponding columns according to roi labels
            bbox_pred_view = bbox_pred.view(bbox_pred.size(0),
                                            int(bbox_pred.size(1) / 4), 4)
            bbox_pred_select = torch.gather(
                bbox_pred_view, 1,
                rois_label.view(rois_label.size(0), 1,
                                1).expand(rois_label.size(0), 1, 4))
            bbox_pred = bbox_pred_select.squeeze(1)

        # compute object classification probability
        cls_score = self.RCNN_cls_score(pooled_feat)
        cls_prob = F.softmax(cls_score)

        RCNN_loss_cls = 0
        RCNN_loss_bbox = 0

        if self.training:
            # classification loss
            if self.transfer and loss_start:
                rois_label_loss = torch.eye(
                    self.n_classes)[rois_label.data.cpu()].type(
                        torch.FloatTensor)
                rois_label_loss = Variable(rois_label_loss.cuda())
                weight_loss_cls = self.weight.view(rois_label.size(0),
                                                   1).repeat(
                                                       1, self.n_classes)

                RCNN_loss_cls = F.binary_cross_entropy_with_logits(
                    cls_score, rois_label_loss, weight_loss_cls)

                # bounding box regression L1 loss
                RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target,
                                                 rois_inside_ws,
                                                 rois_outside_ws, True, True,
                                                 self.weight)

            else:
                RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)

                # bounding box regression L1 loss
                RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target,
                                                 rois_inside_ws,
                                                 rois_outside_ws)

        cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
        bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)

        return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label, dom_loss