コード例 #1
0
ファイル: VGG_inflated.py プロジェクト: praveena2j/WSDAOR
    def forward(self, x, alpha):
        B, C, L, H, W = x.size()
        if self.is_3d:
            feature = self.features(x)  #BCLHW features
            features = self.avgpool(feature)
            #print(feature.size())
            #print(features.size())
            #sys.exit()

            #print("hello")
            features_domain = torch.max(features,
                                        dim=2)[0].squeeze(2).squeeze(2)
            #features_domain = features.permute(2,0,1,3,4) # LBC..
            #features_domain = features_domain.contiguous().view(L*B, 512)

            #x = x.contiguous().view(L,B, 25088)

            #print(features_domain.size())
            #sys.exit()
            #x = x.permute(2,0,1,3,4) # LBC..
            #x = x.contiguous().view(L,B, 25088)
            # L,B ,C is seen as L*B,C for the linear module
        else:
            print("entered")
            #x = x.permute(2,0,1,3,4) # LBC..
            #x = x.contiguous().view(L*B, C,H,W)
            x = self.features(x)  # CHW features
            #x = x.contiguous().view(L,B, 25088)

        reverse_feature = ReverseLayerF.apply(features_domain, alpha)
        class_output = self.class_predictions(features)
        domain_output = self.domain_predictions(reverse_feature)
        #print(class_output.size())

        return features, class_output, domain_output
コード例 #2
0
    def dann(input_data, alpha):
        feature = extractor(input_data)
        feature = feature.view(feature.size(0), -1)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = classifier(feature)
        domain_output = critic(reverse_feature)

        return class_output, domain_output, feature
コード例 #3
0
    def dann(self, input_data, alpha):
        feature = self.feature(input_data)
        feature = feature.view(feature.size(0), -1)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output, feature
コード例 #4
0
ファイル: model.py プロジェクト: bnabis93/DANN_pytorch
    def forward(self, input_data, alpha):
        '''
        what is view function ? : https://stackoverflow.com/questions/42479902/how-does-the-view-method-work-in-pytorch
        '''
        #mnist => 28 * 28
        #view = reshape

        input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28)
        feature = self.feature(input_data)
        feature = feature.view(-1, 50 * 4 * 4)
        reverse_feature = ReverseLayerF.apply(feature, alpha)
        class_output = self.class_classifier(feature)
        domain_output = self.domain_classifier(reverse_feature)

        return class_output, domain_output
コード例 #5
0
	def forward(self, x, alpha, flag, neutralframes, subids, domain):
		#batch_size, timesteps, C, H, W = x.size()

		batch_size, C, timesteps, H, W = x.size() ## Inception
		feature = self.i3d_WSDDA.extract_features(x)
		#t = x.size(2)
		#frame_feature = F.interpolate(feature.squeeze(3).squeeze(3), t, mode='linear')#.squeeze(1)
		features = feature.view(feature.shape[0]*feature.shape[2], -1)#.squeeze()
		#if(neutralframes == 0):
		#	feature = frame_feature.unsqueeze(3).unsqueeze(3)
		#	features = frame_feature.view(frame_feature.shape[0]*frame_feature.shape[2], -1)#.squeeze()
		#else:
		#	for i in range(frame_feature.shape[0]):
		#		frame_feature[i,:,:] = frame_feature[i,:,:] - torch.from_numpy(np.reshape(neutralframes[subids[i]], (1024, 1))).cuda()
		#	feature = frame_feature.unsqueeze(3).unsqueeze(3)
		#	features = frame_feature.view(frame_feature.shape[0]*frame_feature.shape[2], -1)#.squeeze()

			#frame_feature = frame_feature.view(frame_feature.shape[0]*frame_feature.shape[2], -1)#.squeeze()
		#if (flag == 2):  ###  FrameLevel DA
			#feature = self.i3d_WSDDA.extract_features(x)
			#feature = exp_utils.computepeakframe(feature, batch_size, timesteps, numfeat)
			#features = F.interpolate(feature.squeeze(3).squeeze(3), timesteps, mode='linear')#.squeeze(1)
			#features = features.view(features.shape[0]*features.shape[2], -1)#.squeeze()
		#else:
		#	feature = self.i3d_WSDDA.extract_features(x)
		#	if (neutralframes == 1):
		#		numfeat = feature.size(1)
		#		features = exp_utils.computepeakframe(feature, batch_size, timesteps, numfeat)
		#	else:
		#		features = torch.max(feature, dim=2)[0].squeeze(2).squeeze(2)
		reverse_feature = ReverseLayerF.apply(features, alpha)
		new_feature = self.dropout(feature)
		if (domain == 'source'):
			class_output = self.source_predictions(new_feature)
		else:
			class_output = self.target_predictions(new_feature)
		#class_output = self.class_predictions(new_feature)
		domain_output = self.domain_predictions(reverse_feature)
		return feature, class_output, domain_output
コード例 #6
0
ファイル: train_mcd.py プロジェクト: ustcjerry/cada
    def train_onestep(G, C1, C2, config, epoch):
        criterion = nn.CrossEntropyLoss().cuda()
        G.train()
        C1.train()
        C2.train()

        gamma = 2 / (1 + math.exp(-10 * (epoch) / config['n_epochs'])) - 1

        iter_source = iter(config['source_train_loader'])
        iter_target = iter(config['target_train_loader'])
        len_source_loader = len(config['source_train_loader'])
        len_target_loader = len(config['target_train_loader'])
        num_iter = len_source_loader
        for i in range(1, num_iter + 1):
            data_source, label_source = iter_source.next()
            data_target, _ = iter_target.next()
            if i % len_target_loader == 0:
                iter_target = iter(config['target_train_loader'])
            if torch.cuda.is_available():
                data_source, label_source = data_source.cuda(
                ), label_source.cuda()
                data_target = data_target.cuda()

            opt_g.zero_grad()
            opt_c1.zero_grad()
            opt_c2.zero_grad()

            set_requires_grad(G, requires_grad=True)
            set_requires_grad(C1, requires_grad=True)
            set_requires_grad(C2, requires_grad=True)
            feat_s = G(data_source)
            output_s1 = C1(feat_s)
            output_s2 = C2(feat_s)
            loss_s1 = criterion(output_s1, label_source)
            loss_s2 = criterion(output_s2, label_source)
            loss_s = loss_s1 + loss_s2
            # loss_s.backward(retain_variables=True)
            ##loss_s.backward()

            set_requires_grad(G, requires_grad=False)
            set_requires_grad(C1, requires_grad=True)
            set_requires_grad(C2, requires_grad=True)
            with torch.no_grad():
                feat_t = G(data_target)
            reverse_feature_t = ReverseLayerF.apply(feat_t, gamma)
            output_t1 = C1(reverse_feature_t)
            output_t2 = C2(reverse_feature_t)

            loss_dis = -discrepancy(output_t1, output_t2)
            ##loss_dis.backward()
            loss = loss_s + loss_dis
            loss.backward()
            opt_c1.step()
            opt_c2.step()
            opt_g.step()

            if i % 20 == 0:
                print(
                    'Train Epoch: {}, Loss1: {:.6f}\t Loss2: {:.6f}\t  Discrepancy: {:.6f}'
                    .format(epoch, loss_s1.item(), loss_s2.item(),
                            loss_dis.item()))