コード例 #1
0
    def forward_n_layers(self,
                         x,
                         target=None,
                         mixup=False,
                         mixup_hidden=False,
                         mixup_alpha=None,
                         layer_num=None):
        #print x.shape
        if self.per_img_std:
            x = per_image_standardization(x)

        out = x

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)
        out = self.conv1(out)
        out = self.layer1(out)
        if layer_num == 1:
            return out, target_reweighted
        out = self.layer2(out)
        if layer_num == 2:
            return out, target_reweighted
        out = self.layer3(out)
        if layer_num == 3:
            return out, target_reweighted
        out = act(self.bn1(out))
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        return out, target_reweighted
コード例 #2
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None):
        #import pdb; pdb.set_trace()
        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv1(out)
        out = self.layer1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer4(out)
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        if target is not None:
            return out, target_reweighted
        else:
            return out
コード例 #3
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None,
                layer_num_out=None):
        #print x.shape

        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv1(out)
        out = self.layer1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 1:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = self.layer2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 2:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = self.layer3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 3:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = act(self.bn1(out))
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        if layer_num_out == 4:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        if layer_num_out is not None:
            return out, target_reweighted, out_tmp

        if target is not None:
            return out, target_reweighted
        else:
            return out
コード例 #4
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None):

        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv_1_3x3(out)
        out = F.relu(self.bn_1(out), inplace=True)
        out = self.stage_1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.stage_2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.stage_3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.avgpool(out)
        out = out.view(out.size(0), -1)
        if self.dropout:
            out = F.dropout(out, p=0.5, training=self.training)
        out = self.classifier(out)

        if target is not None:
            return out, target_reweighted
        else:
            return out
コード例 #5
0
    def forward(self, x, target= None, mixup=False, mixup_hidden=False, mixup_alpha=None, MyMixup = False, MyMixup_num_layer = 4):
        #import pdb; pdb.set_trace()
        if self.per_img_std:
            x = per_image_standardization(x)
        
        if mixup_hidden:
            layer_mix = random.randint(0,2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None   
        
        out = x
        
        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        # lam은 tensor형 소수
        # tensor([0.7119], device='cuda:0')

        ###########################################각 layer에 맞게 output과 target값을 얻어냄############################    

        if target is not None :
            target_reweighted = to_one_hot(target,self.num_classes)
        
        if MyMixup:
            if MyMixup_num_layer >= 0:
                mixup_out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #shape :  torch.Size([100, 3, 32, 32])
            
            if MyMixup_num_layer >= 1:
                mixup_out = self.conv1(mixup_out)
                mixup_out = self.layer1(mixup_out)    # shape :  torch.Size([100, 64, 32, 32])            
                

            if MyMixup_num_layer >= 2:
                mixup_out = self.layer2(mixup_out)    # shape :  torch.Size([100, 128, 16, 16])
                
            if MyMixup_num_layer >= 3:
                mixup_out = self.layer3(mixup_out)    #shape :  torch.Size([100, 256, 8, 8])

                
                
            with torch.no_grad():
                if MyMixup_num_layer >= 1:
                    out = self.conv1(out)
                    out = self.layer1(out)    # shape :  torch.Size([100, 64, 32, 32])            
                if MyMixup_num_layer >= 2:
                    out = self.layer2(out)    # shape :  torch.Size([100, 128, 16, 16])
                if MyMixup_num_layer >= 3:
                    out = self.layer3(out)    #shape :  torch.Size([100, 256, 8, 8])
            return mixup_out, out
                
        else:
            if layer_mix == 0: 
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #input값을 섞어줌
                #out.shape : torch.Size([100, 3, 32, 32])

            out = self.conv1(out)
            out = self.layer1(out)
#           out.shape : torch.Size([100, 64, 32, 32])

            if layer_mix == 1:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #1번 layer의 output을 섞어줌

            out = self.layer2(out)
#           out.shape : torch.Size([100, 128, 16, 16])

            if layer_mix == 2:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)   #2번 layer의 output을 섞어줌


            out = self.layer3(out)
#           out.shape :  torch.Size([100, 256, 8, 8])
            if  layer_mix == 3:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)   #3번 layer의 output을 섞어줌
            out = self.layer4(out)
            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)
            ##########################################################################################################    
            # out.shape                 = torch.Size([100, 256, 8, 8])
            # target_reweighted.shape   = torch.Size([100, 10])
            # out.shape(after)          = torch.Size([100, 10])
        

        if target is not None:
            return out, target_reweighted
        else: 
            return out
コード例 #6
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None,
                MyMixup=False,
                MyMixup_num_layer=1):
        #print x.shape
        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if MyMixup:
            """
            
                out: image
                target_reweigted = label
            """

            Aug_out = transform(out)

            out = self.conv1(out)
            out = self.layer1(out)

            if MyMixup_num_layer == 1:
                middle_original_output = out

            out = self.layer2(out)

            if MyMixup_num_layer == 2:
                middle_original_output = out

            out = self.layer3(out)
            if MyMixup_num_layer == 3:
                middle_original_output = out

            out = act(self.bn1(out))
            out = F.avg_pool2d(out, 8)
            out = out.view(out.size(0), -1)
            out = self.linear(out)

            with torch.no_grad():

                Aug_out = self.conv1(Aug_out)
                Aug_out = self.layer1(Aug_out)

                if MyMixup_num_layer == 1:
                    middle_Aug_output = Aug_out

                Aug_out = self.layer2(Aug_out)

                if MyMixup_num_layer == 2:
                    middle_Aug_output = Aug_out

                Aug_out = self.layer3(Aug_out)
                if MyMixup_num_layer == 3:
                    middle_Aug_output = Aug_out

                Aug_out = act(self.bn1(Aug_out))
                Aug_out = F.avg_pool2d(Aug_out, 8)
                Aug_out = Aug_out.view(Aug_out.size(0), -1)

                Aug_out = self.linear(Aug_out)

            return out, middle_original_output, middle_Aug_output, target_reweighted

        else:
            if layer_mix == 0:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.conv1(out)
            out = self.layer1(out)

            if layer_mix == 1:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.layer2(out)

            if layer_mix == 2:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.layer3(out)
            if layer_mix == 3:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = act(self.bn1(out))
            out = F.avg_pool2d(out, 8)
            out = out.view(out.size(0), -1)
            out = self.linear(out)

            if target is not None:
                return out, target_reweighted
            else:
                return out
コード例 #7
0
    def forward(self,
                x,
                target=None,
                mixup_hidden=False,
                mixup_alpha=0.1,
                layer_mix=None):

        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden == True:
            if layer_mix == None:
                layer_mix = random.randint(0, 2)

            out = x

            if layer_mix == 0:
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            out = self.conv1(x)

            out = self.layer1(out)

            if layer_mix == 1:
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            out = self.layer2(out)

            if layer_mix == 2:
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            out = self.layer3(out)

            if layer_mix == 3:
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            out = self.layer4(out)

            if layer_mix == 4:
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)
            #if layer_mix == 4:
            #    out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            lam = torch.tensor(lam).cuda()
            lam = lam.repeat(y_a.size())
            return out, y_a, y_b, lam

        else:
            out = x
            out = self.conv1(x)
            out = self.layer1(out)
            out = self.layer2(out)
            out = self.layer3(out)
            out = self.layer4(out)
            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)
            return out
        """
コード例 #8
0
ファイル: resnet.py プロジェクト: youtang1993/manifold_mixup
    def forward(self,
                x,
                target=None,
                mixup_hidden=False,
                mixup_alpha=0.1,
                layer_mix=None):
        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden == True:
            if layer_mix == None:
                layer_mix = random.randint(0, 2)

            out = x

            if layer_mix == 0:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)
            #print (out)

            out = F.relu(self.bn1(self.conv1(x)))

            out = self.layer1(out)

            if layer_mix == 1:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            #print (out)

            out = self.layer2(out)

            if layer_mix == 2:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)
        #print (out)

            out = self.layer3(out)

            if layer_mix == 3:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)
            #print (out)

            out = self.layer4(out)

            if layer_mix == 4:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            #print (out)
            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)

            if layer_mix == 5:
                #out = lam * out + (1 - lam) * out[index,:]
                out, y_a, y_b, lam = mixup_data(out, target, mixup_alpha)

            lam = torch.tensor(lam).cuda()
            lam = lam.repeat(y_a.size())
            #d = {}
            #d['out'] = out
            #d['target_a'] = y_a
            #d['target_b'] = y_b
            #d['lam'] = lam
            #print (out.shape)
            #print (y_a.shape)
            #print (y_b.size())
            #print (lam.size())
            return out, y_a, y_b, lam

        else:
            out = x
            out = F.relu(self.bn1(self.conv1(x)))
            out = self.layer1(out)
            out = self.layer2(out)
            out = self.layer3(out)
            out = self.layer4(out)
            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)
            return out