def compute_y(self,x,targets,mixup=False, visible_mixup=False):

        target_soft = to_one_hot(targets,10)


        if visible_mixup:
            lam = Variable(torch.from_numpy(np.array([np.random.beta(0.5,0.5)]).astype('float32')).cuda())
            x, target_soft = mixup_process(x, target_soft, lam=lam)

        h = self.h1(x)

        if mixup:
            lam = Variable(torch.from_numpy(np.array([np.random.beta(0.5,0.5)]).astype('float32')).cuda())
            h, target_soft = mixup_process(h, target_soft, lam=lam)

        y = self.sm(self.h2(h))

        return y, target_soft
Exemple #2
0
    def forward(self, x, lam=None, target=None, target_reweighted=None, layer_mix='rand'):

        if layer_mix == 'rand':
            if self.mixup_hidden:
                layer_mix = random.randint(0,2)
            else:
                layer_mix = 0

        out = x

        if lam is not None:
            if target_reweighted is None:
                target_reweighted = to_one_hot(target,self.num_classes)
            else:
                assert target is None
            if layer_mix == 0:
                out, target_reweighted = mixup_process(out, target_reweighted, lam)

        out = self.conv1(out)
        out = self.layer1(out)

        if lam is not None and layer_mix == 1:
            out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)

        out = self.layer2(out)

        if lam is not None and layer_mix == 2:
            out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)

        out = self.layer3(out)
        out = self.layer4(out)
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        if lam is None:
            return out
        else:
            return out, target_reweighted
    def forward(self, x, y=None, mixup=False):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.max_pool2d(x, 2)
        x = F.relu(self.conv4(x))
        x = F.relu(self.conv5(x))
        x = F.relu(self.conv6(x))
        x = F.max_pool2d(x, 2)
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        if mixup:
            lam = Variable(torch.from_numpy(np.array([np.random.beta(0.5,0.5)]).astype('float32')).cuda())
            x, y = utils.mixup_process(x, y, lam=lam)

        x = F.relu(self.fc3(x))
        x = self.fc4(x)
        x = F.log_softmax(x, dim=1)
        if mixup:
            return x, y
        else:
            return x
Exemple #4
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None,
                layer_num_out=None):
        #print x.shape

        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv1(out)
        out = self.layer1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 1:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = self.layer2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 2:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = self.layer3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)
        if layer_num_out == 3:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        out = act(self.bn1(out))
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        if layer_num_out == 4:
            out_tmp = Variable(out.detach().data, requires_grad=False)

        if layer_num_out is not None:
            return out, target_reweighted, out_tmp

        if target is not None:
            return out, target_reweighted
        else:
            return out
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None):
        #import pdb; pdb.set_trace()
        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv1(out)
        out = self.layer1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.layer4(out)
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)

        if target is not None:
            return out, target_reweighted
        else:
            return out
Exemple #6
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None):

        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if layer_mix == 0:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.conv_1_3x3(out)
        out = F.relu(self.bn_1(out), inplace=True)
        out = self.stage_1(out)

        if layer_mix == 1:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.stage_2(out)

        if layer_mix == 2:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.stage_3(out)
        if layer_mix == 3:
            out, target_reweighted = mixup_process(out,
                                                   target_reweighted,
                                                   lam=lam)

        out = self.avgpool(out)
        out = out.view(out.size(0), -1)
        if self.dropout:
            out = F.dropout(out, p=0.5, training=self.training)
        out = self.classifier(out)

        if target is not None:
            return out, target_reweighted
        else:
            return out
    def forward(self, x, target= None, mixup=False, mixup_hidden=False, mixup_alpha=None, MyMixup = False, MyMixup_num_layer = 4):
        #import pdb; pdb.set_trace()
        if self.per_img_std:
            x = per_image_standardization(x)
        
        if mixup_hidden:
            layer_mix = random.randint(0,2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None   
        
        out = x
        
        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        # lam은 tensor형 소수
        # tensor([0.7119], device='cuda:0')

        ###########################################각 layer에 맞게 output과 target값을 얻어냄############################    

        if target is not None :
            target_reweighted = to_one_hot(target,self.num_classes)
        
        if MyMixup:
            if MyMixup_num_layer >= 0:
                mixup_out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #shape :  torch.Size([100, 3, 32, 32])
            
            if MyMixup_num_layer >= 1:
                mixup_out = self.conv1(mixup_out)
                mixup_out = self.layer1(mixup_out)    # shape :  torch.Size([100, 64, 32, 32])            
                

            if MyMixup_num_layer >= 2:
                mixup_out = self.layer2(mixup_out)    # shape :  torch.Size([100, 128, 16, 16])
                
            if MyMixup_num_layer >= 3:
                mixup_out = self.layer3(mixup_out)    #shape :  torch.Size([100, 256, 8, 8])

                
                
            with torch.no_grad():
                if MyMixup_num_layer >= 1:
                    out = self.conv1(out)
                    out = self.layer1(out)    # shape :  torch.Size([100, 64, 32, 32])            
                if MyMixup_num_layer >= 2:
                    out = self.layer2(out)    # shape :  torch.Size([100, 128, 16, 16])
                if MyMixup_num_layer >= 3:
                    out = self.layer3(out)    #shape :  torch.Size([100, 256, 8, 8])
            return mixup_out, out
                
        else:
            if layer_mix == 0: 
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #input값을 섞어줌
                #out.shape : torch.Size([100, 3, 32, 32])

            out = self.conv1(out)
            out = self.layer1(out)
#           out.shape : torch.Size([100, 64, 32, 32])

            if layer_mix == 1:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)  #1번 layer의 output을 섞어줌

            out = self.layer2(out)
#           out.shape : torch.Size([100, 128, 16, 16])

            if layer_mix == 2:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)   #2번 layer의 output을 섞어줌


            out = self.layer3(out)
#           out.shape :  torch.Size([100, 256, 8, 8])
            if  layer_mix == 3:
                out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)   #3번 layer의 output을 섞어줌
            out = self.layer4(out)
            out = F.avg_pool2d(out, 4)
            out = out.view(out.size(0), -1)
            out = self.linear(out)
            ##########################################################################################################    
            # out.shape                 = torch.Size([100, 256, 8, 8])
            # target_reweighted.shape   = torch.Size([100, 10])
            # out.shape(after)          = torch.Size([100, 10])
        

        if target is not None:
            return out, target_reweighted
        else: 
            return out
Exemple #8
0
    def forward(self,
                x,
                target=None,
                mixup=False,
                mixup_hidden=False,
                mixup_alpha=None,
                MyMixup=False,
                MyMixup_num_layer=1):
        #print x.shape
        if self.per_img_std:
            x = per_image_standardization(x)

        if mixup_hidden:
            layer_mix = random.randint(0, 2)
        elif mixup:
            layer_mix = 0
        else:
            layer_mix = None

        out = x

        if mixup_alpha is not None:
            lam = get_lambda(mixup_alpha)
            lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
            lam = Variable(lam)

        if target is not None:
            target_reweighted = to_one_hot(target, self.num_classes)

        if MyMixup:
            """
            
                out: image
                target_reweigted = label
            """

            Aug_out = transform(out)

            out = self.conv1(out)
            out = self.layer1(out)

            if MyMixup_num_layer == 1:
                middle_original_output = out

            out = self.layer2(out)

            if MyMixup_num_layer == 2:
                middle_original_output = out

            out = self.layer3(out)
            if MyMixup_num_layer == 3:
                middle_original_output = out

            out = act(self.bn1(out))
            out = F.avg_pool2d(out, 8)
            out = out.view(out.size(0), -1)
            out = self.linear(out)

            with torch.no_grad():

                Aug_out = self.conv1(Aug_out)
                Aug_out = self.layer1(Aug_out)

                if MyMixup_num_layer == 1:
                    middle_Aug_output = Aug_out

                Aug_out = self.layer2(Aug_out)

                if MyMixup_num_layer == 2:
                    middle_Aug_output = Aug_out

                Aug_out = self.layer3(Aug_out)
                if MyMixup_num_layer == 3:
                    middle_Aug_output = Aug_out

                Aug_out = act(self.bn1(Aug_out))
                Aug_out = F.avg_pool2d(Aug_out, 8)
                Aug_out = Aug_out.view(Aug_out.size(0), -1)

                Aug_out = self.linear(Aug_out)

            return out, middle_original_output, middle_Aug_output, target_reweighted

        else:
            if layer_mix == 0:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.conv1(out)
            out = self.layer1(out)

            if layer_mix == 1:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.layer2(out)

            if layer_mix == 2:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = self.layer3(out)
            if layer_mix == 3:
                out, target_reweighted = mixup_process(out,
                                                       target_reweighted,
                                                       lam=lam)

            out = act(self.bn1(out))
            out = F.avg_pool2d(out, 8)
            out = out.view(out.size(0), -1)
            out = self.linear(out)

            if target is not None:
                return out, target_reweighted
            else:
                return out