Exemplo n.º 1
0
def main():
    model = VGG(depth=16, init_weights=True, cfg=None)
    # model = VGG_shaokai("vgg16")
    # model = ConvNet()
    # model = ResNet18()
    # model = torch.nn.DataParallel(model)
    model.load_state_dict(torch.load("./model_pruned/2019-04-09 11:14:52.016169/column-filter-fivelabels-masked_retrain/cifar10_vgg16_retrained_acc_93.960_4rhos_config_vgg16_v2.yaml.pt"))
    model.cuda()

    criterion = F.cross_entropy
#    criterion = CrossEntropyLossMaybeSmooth(smooth_eps=0).cuda()
    validate(test_loader, criterion, model)
    # test(model, criterion, test_loader)
    print("\n------------------------------\n")


    print('here')
    for name, weight in model.named_parameters():
        if (len(weight.size()) == 4 and "shortcut" not in name):
            print(name, weight.size())


    print('here now')
    test_column_sparsity(model)
    # test_chanel_sparsity(model)
    test_filter_sparsity(model)
Exemplo n.º 2
0
def get_model(path='pretrained/ckpt.t7'):
    #model = nn.DataParallel(VGG('VGG16'))
    model = VGG('VGG16')
    #if True:
    #    print(model._modules['features']._modules)
    #checkpoint=torch.load(path, map_location='cpu')
    #checkpoint=torch.load(path, map_location=lambda storage, loc:storage)
    #model was trained on GPU
    state_dict = torch.load(path)['net']
    if False:
        print(state_dict.keys())
    if False:
        print(list(model._modules.keys()))
        if use_gpu:
            print(model._modules['module']._modules['features']._modules)
        else:
            print(model._modules['features']._modules)

    if use_gpu:
        model = nn.DataParallel(VGG('VGG16'))
    else:
        model = VGG('VGG16')
        new_dict = OrderedDict()
        for key, val in state_dict.items():
            key = key[7:]  #since 'module.' has len 7
            new_dict[key] = val.to('cpu')
        state_dict = new_dict
    if False:
        print(state_dict.keys())
    model.load_state_dict(state_dict)
    return model
def get_vgg_net(model_folder, out_keys=['r11', 'r21', 'r31', 'r41', 'r51']):

    vgg_net = VGG(pool='avg', out_keys=out_keys)
    vgg_net.load_state_dict(torch.load(model_folder + 'vgg_conv.pth'))
    vgg_net.cuda()
    for param in vgg_net.parameters():
        param.requires_grad = False
    return vgg_net
Exemplo n.º 4
0
def test(test_model_dir,
         batch_size,
         mean=(0.5, 0.5, 0.5),
         std=(0.5, 0.5, 0.5),
         model=None):
    transform = transforms.Compose([
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    test_dataset = torchvision.datasets.CIFAR100("../../datasets/",
                                                 download=True,
                                                 train=False,
                                                 transform=transform)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=batch_size,
                                              shuffle=True)

    if model is None:
        state_dict = torch.load(test_model_dir)
        model = VGG(state_dict['model_type'], True).cuda()
        model.load_state_dict(state_dict["model_state_dict"])

    classes = []
    accuracy = 0.0
    with torch.no_grad():
        tot = 0
        correct = 0

        for idx, (x, y) in enumerate(test_loader):
            x = x.cuda()
            y = y.cuda()

            out = model(x)
            _, predicted = torch.max(out.data, 1)

            for j in range(len(predicted)):
                tot = tot + 1
                if predicted[j] == y.cpu()[j]:
                    correct = correct + 1

            print("Test Percent Finished: {}%.".format(idx * 100 /
                                                       len(test_loader)))

        print(
            "{} th Picture Predicted: Correct Predict: {}. Correct Percentage: {:.2f}%"
            .format(tot, correct, correct * 100 / tot))
        accuracy = correct / tot * 100
    return accuracy
def run():
    classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise',
               'Neutral')
    crop_size = 44
    trained_model = torch.load("C:/Users/Admin/Downloads/model_state.pth.tar")
    model = VGG("VGG19")
    model.load_state_dict(trained_model["model_weights"])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    st.title("Facial expression recognition")
    img_file = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])

    if img_file is None:
        st.write('** Please upload an image **')
    original_image = Image.open(img_file, mode='r')
    st.image(original_image, use_column_width=True)
    model = 1
    if st.button('Predict'):
        predict_image = detect(model, original_image)
        image = Image.fromarray(cv2.cvtColor(predict_image, cv2.COLOR_BGR2RGB))
        st.image(image, use_column_width=True)
            teacher = VGG(depth=16, init_weights=True, cfg=None)
        elif args.teachdepth == 19:
            teacher = VGG(depth=19, init_weights=True, cfg=None)
        else:
            sys.exit("vgg doesn't have those depth!")
    elif args.teacharch == "resnet":
        if args.teachdepth == 18:
            teacher = ResNet18()
        elif args.teachdepth == 50:
            teacher = ResNet50()
        else:
            sys.exit("resnet doesn't implement those depth!")
    else:
        sys.exit("unknown network")
    # teacher = ResNet18()
    teacher.load_state_dict(torch.load(args.teacher_path))
    teacher.cuda()
    if args.multi_gpu:
        teacher = torch.nn.DataParallel(teacher)

#############

criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
# args.smooth = args.smooth_eps > 0.0
# args.mixup = config.alpha > 0.0

optimizer_init_lr = args.warmup_lr if args.warmup else args.lr

optimizer = None
if (args.optmzr == 'sgd'):
    optimizer = torch.optim.SGD(model.parameters(),
ap.add_argument("--output", type=str, help="Output path to save")
ap.add_argument("--mode", type=str, help="mtcnn or haarcascade")
args = ap.parse_args()

mode = args.mode
assert mode in {"mtcnn", "haarcascade"}

classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')
crop_size = 44
image_path = args.input
#Load model
trained_model = torch.load(args.trained_model)
print("Load weight model with {} epoch".format(trained_model["epoch"]))

model = VGG(args.model_name)
model.load_state_dict(trained_model["model_weights"])
model.to(device)
model.eval()

transform_test = transforms.Compose([
    transforms.TenCrop(crop_size),
    transforms.Lambda(lambda crops: torch.stack(
        [transforms.ToTensor()(crop) for crop in crops]))
])


def detect():
    original_image = cv2.imread(image_path)
    if mode == "haarcascade":
        gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
        faces = haarcascade_detect.face_detect(gray_image)
Exemplo n.º 8
0
def main():
    best_acc = 0
    start_epoch = args.start_epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    trainloader = getdata(args, train=True)
    testloader = getdata(args, train=False)

    model = VGG(args.attention, args.nclass)

    if args.gpu:
        if torch.cuda.is_available():
            model = model.cuda()
            cudnn.benchmark = True
        else:
            print(
                'There is no cuda available on this machine use cpu instead.')
            args.gpu = False

    criterion = nn.CrossEntropyLoss()
    optimizer = ''
    if args.optimizer == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.weight_decay)
    else:
        print(args.optimizer, 'is not correct')
        return

    title = 'cifar-10-' + args.attention

    if args.evaluate:
        print('\nEvaluation only')
        assert os.path.isfile(
            args.evaluate), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.evaluate)
        model.load_state_dict(checkpoint['state_dict'])
        test_loss, test_acc = test(model, testloader, criterion, args.gpu)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint,
                                     state['attention'] + '-' + 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint,
                                     state['attention'] + '-' + 'log.txt'),
                        title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    for epoch in range(start_epoch, args.epochs):
        start_time = time.time()
        adjust_learning_rate(optimizer, epoch)

        train_loss, train_acc = train(model, trainloader, criterion, optimizer,
                                      epoch, args.gpu)
        test_loss, test_acc = test(model, testloader, criterion, args.gpu)
        if sys.version[0] == '3':
            train_acc = train_acc.cpu().numpy().tolist()[0]
            test_acc = test_acc.cpu().numpy().tolist()[0]
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
                'attention': state['attention'],
            },
            is_best,
            checkpoint=args.checkpoint)
        print(time.time() - start_time)
        print(
            "epoch: {:3d}, lr: {:.8f}, train-loss: {:.3f}, test-loss: {:.3f}, train-acc: {:2.3f}, test_acc:, {:2.3f}"
            .format(epoch, state['lr'], train_loss, test_loss, train_acc,
                    test_acc))

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint,
                         state['attention'] + '-' + 'log.eps'))

    print('Best acc:', best_acc)
Exemplo n.º 9
0
    return


style_img = "./QiBashi.jpg"
content_img = "./2.jpg"
styleImg = load_img(style_img)
contentImg = load_img(content_img)

#for running on cuda
styleImg = styleImg.cuda()
contentImg = contentImg.cuda()

vgg_directory = "./vgg_conv.pth"  #path to pretrained vgg vgg_directory
vgg = VGG()
#print(vgg.state_dict())
vgg.load_state_dict(torch.load(vgg_directory))
for param in vgg.parameters():
    param.requires_grad = False

vgg.cuda()  # Putting model on cuda


class GramMatrix(nn.Module):
    def forward(self, input):
        b, c, h, w = input.size()
        f = input.view(b, c, h * w)  #bxcx(hxw)
        # torch.bmm(batch1, batch2, out=None)
        # batch1 : bxmxp, batch2 : bxpxn -> bxmxn
        G = torch.bmm(f, f.transpose(
            1, 2))  # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc
        return G.div_(h * w)
Exemplo n.º 10
0
                                       train=False,
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=2)

# Load the conv model
net = VGG('VGG16')
net = net.cuda()
criterion_no_constrain = nn.CrossEntropyLoss()
net.load_state_dict(torch.load(model_path))

net.eval()
store_feat_maps(net)  # store all feature maps and max pooling locations
#
# Build the deconv net
net_decocnv = _vgg16Deconv()
for idx, layer in enumerate(net.features):
    if isinstance(layer, nn.Conv2d):
        net_decocnv.features[net_decocnv.conv2deconv_indices[
            idx]].weight.data = layer.weight.data
        if idx in net_decocnv.conv2deconv_bias_indices:
            net_decocnv.features[net_decocnv.conv2deconv_bias_indices[
                idx]].bias.data = layer.bias.data
net_decocnv = net_decocnv.cuda()
net_decocnv.eval()
class counterGAN():
    def __init__(self,device):
        
        self.nz = 100
        self.beta1 = 0.5
        self.real_label = 1
        self.fake_label = 0
        self.L = 100
        self.device = device
        self.iters = 0

        self.netG = Generator(self.nz).to(self.device)
        self.netD = Discriminator().to(self.device)
        self.netTarget = VGG('VGG16').to(self.device)
        self.netTarget.load_state_dict(torch.load('BestClassifierModel.pth',map_location=self.device))

        # fixed_noise -> stores fixed generator seed for inference
        self.fixed_noise = torch.randn(64, self.nz, 1, 1, device=self.device)

        self.netG.apply(self.weights_init)
        self.netD.apply(self.weights_init)

        self.optimizerG = optim.Adam(self.netG.parameters(), lr=2e-4, betas=(self.beta1,0.999))
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=2e-4, betas=(self.beta1,0.999))
        self.criterion = nn.BCELoss()
        self.criterionTarget = nn.CrossEntropyLoss()

        # criterionPerturbation -> norm of the generated noise
        self.criterionPerturbation = nn.MSELoss()

    def weights_init(self,m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            nn.init.normal_(m.weight.data, 0.0, 0.02)

    def save_state_dicts(self,path):
        
        torch.save({
            'netG_state_dict':self.netG.state_dict(),
            'netD_state_dict':self.netD.state_dict(),
            'optimizerG_state_dict':self.optimizerG.state_dict(),
            'optimizerD_state_dict':self.optimizerD.state_dict()
        },path)
        

    def load_state_dicts(self,path):
        
        self.netG.load_state_dict(torch.load(path)['netG_state_dict'])
        self.netD.load_state_dict(torch.load(path)['netD_state_dict'])
        self.optimizerG.load_state_dict(torch.load(path)['optimizerG_state_dict'])
        self.optimizerD.load_state_dict(torch.load(path)['optimizerD_state_dict'])

        print('Loaded state dicts')

    def train(self,init_epoch, num_epochs,dataloaders,dataloaders_adv):

        G_losses = []
        D_losses = []
        T_losses = []
        P_losses = []
        Losses = []
        img_list_adv = []
        img_list_real = []
        self.iters = 0

        
        for epoch in range(init_epoch,num_epochs):
            # pbar = tqdm(total=len(dataloaders_adv['train']))
            for idx, (D, D_adv) in enumerate(zip(dataloaders['train'], dataloaders_adv['train'])):
                # pbar.update(1)
                self.netG.train()
                self.netD.train()
                
                #Train Discriminator on adversarial image
                self.netD.zero_grad()
                
                
                
                #adversarial images definition              
                image_adv = D_adv[0].to(self.device)
                target_labels_adv = D_adv[1].to(self.device)
                batch_size_adv = image_adv.size()[0]
                # label_adv -> target label to use for adversarial images while training discriminator
                label_adv = torch.full((batch_size_adv,),self.fake_label,dtype=torch.float,device=self.device)
                
                #real images definition
                image = D[0][:batch_size_adv,...].to(self.device)
                target_labels = D[1].to(self.device)
                batch_size = image.size()[0]
                # label -> target label to use for real images while training discriminator
                label = torch.full((batch_size,),self.real_label,dtype=torch.float,device=self.device)
                

                out_real= self.netD(image).view(-1)
                loss_d_real = self.criterion(out_real,label)
                # loss_d_real.backward()

                # D_x -> output of the discriminator for real images. Between (0,1)
                D_x = out_real.mean().item()

                #Train Discriminator on generated images
                noise = torch.randn(batch_size,self.nz,1,1,device=self.device)
                generated = self.netG(noise)+image_adv
                label_adv = torch.full((batch_size,),self.fake_label,dtype=torch.float,device=self.device)

                out_generated = self.netD(generated.clone().detach()).view(-1)
                loss_d_generated = self.criterion(out_generated,label_adv.detach())
                # loss_d_generated.backward()

                # D_G_z1 -> output of the discriminator for generated images. Between (0,1)
                D_G_z1 = out_generated.mean().item()

                # loss_d -> total loss of discriminator
                loss_d = loss_d_generated+loss_d_real

                # self.optimizerD.step()

                #Train Generator
                self.netG.zero_grad()
                self.netTarget.zero_grad()

                label_adv = torch.full((batch_size,),self.real_label,dtype=torch.float,device=self.device)

                out_generated_2 = self.netD(generated.clone()).view(-1)
                loss_g = self.criterion(out_generated_2,label_adv)
                # loss_g.backward(retain_graph=True)
                
                # Calculate loss on classifier
                out_classifier = self.netTarget(generated.clone())
                loss_c = self.criterionTarget(out_classifier,target_labels_adv)
                # loss_c.backward()

                

                # Calculate norm of noise generated
                loss_p = self.criterionPerturbation(image,generated)

                # loss -> final loss
                loss = loss_d+(10*loss_c)+loss_g-loss_p
                loss.backward()
                self.optimizerG.step()
                if idx%5==0 and idx!=0:
                    # update the Discriminator every 5th step
                    self.optimizerD.step()
                # self.optimizerG.step()

                # D_G_z2 -> output of the discriminator for generated images. Between (0,1)
                D_G_z2 = out_generated_2.mean().item()
                if idx%50==0:
                    print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tLoss_C: %.4f\tLoss_P: %.4f\tOverall loss: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, idx, len(dataloaders['train']),
                     loss_d.item(), loss_g.item(), loss_c.item(), loss_p.item(), loss.item(), D_x, D_G_z1, D_G_z2))
                
                G_losses.append(loss_g.item())
                D_losses.append(loss_d.item())
                T_losses.append(loss_c.item())
                P_losses.append(loss_p.item())
                Losses.append(loss.item())

                # Check how the generator is doing by saving G's output on fixed_noise
                if (self.iters % 50 == 0) or ((epoch == num_epochs-1) and (idx == len(dataloaders['train'])-1)):
                    with torch.no_grad():
                        generated = (self.netG(self.fixed_noise)+image_adv[:64]).detach().cpu()
                        img_list_adv.append(vutils.make_grid(generated, padding=2, normalize=True))
                    # TODO Visualize
                    with torch.no_grad():
                        generated = (self.netG(self.fixed_noise)+image[:64]).detach().cpu()
                        img_list_real.append(vutils.make_grid(generated, padding=2, normalize=True))
                self.iters += 1

                
            # pbar.close()
            if epoch%5==0 or epoch==num_epochs-1:
                self.visualize_images(img_list_adv,epoch,img_type='Adversarial')
                self.visualize_images(img_list_real,epoch,img_type='Real')
            self.save_state_dicts(f'BestcounterGAN_{epoch}.pth')
        return D_losses,G_losses, img_list_adv, img_list_real


    def visualize_images(self, img_list, epoch=None, img_type='Real'):

        if epoch==None:
            
            epoch = ''
        
        fig = plt.figure(figsize=(8,8))
        plt.axis("off")
        plt.title("Generated Images {}".format(img_type))
        plt.imshow(np.transpose(img_list[-1],(1,2,0)))
        plt.savefig('./images/Image_{}_{}.png'.format(epoch,img_type))

        return

    def inference(self, images, fixed_noise=self.fixed_noise):

        generated = (self.netG(self.fixed_noise)+images).detach().cpu().tolist()

        return generated
Exemplo n.º 12
0
poisoned_testloader = torch.utils.data.DataLoader(poisoned_testset, batch_size=50, shuffle=False, num_workers=2)


classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

# Model
print('==> Building model..')
net = VGG('VGG11')

net = net.to(device)

# load model here
with open("trained_checkpoint_vanilla.pt", "rb") as ckpt_file:
    trained_stat_dict = torch.load(ckpt_file, map_location='cuda')

net.load_state_dict(trained_stat_dict)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
scheduler_multi_step = lr_scheduler.MultiStepLR(optimizer, milestones=[e for e in [151, 251]], gamma=0.1)

# Training
def train(epoch, trainloader):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
Exemplo n.º 13
0
        [transforms.ToTensor()(crop) for crop in crops]))
])

if args.checkpoint is None:
    start_epoch = 0
    model = VGG(args.model_name)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)
else:
    checkpoint = torch.load(args.checkpoint)
    start_epoch = checkpoint['epoch'] + 1
    print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
    model = VGG(args.model_name)
    model.load_state_dict(checkpoint["model_weights"])
    optimizer = checkpoint["optimizer"]
    if args.adjust_optim is not None:
        print("Adjust optimizer....")
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=5e-4)

data = FER2013(args.dataset_root, split="TRAIN", transform=transform_train)
valid_data = FER2013(args.dataset_root,
                     split="PUBLIC_TEST",
                     transform=transform_test)
train_loader = torch.utils.data.DataLoader(data,
                                           batch_size=args.bs,
                                           shuffle=True,
Exemplo n.º 14
0
    def train(self):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model = VGG(self.model_type, True).to(device)

        loss_function = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=self.learning_rate,
                                    weight_decay=5e-4)

        start_epoch = 0
        end_epoch = self.epochs
        average_loss_list = []
        writer = SummaryWriter('logs')

        if self.load_model_dir is not None:
            checkpoint = torch.load(self.load_model_dir)
            model.load_state_dict(checkpoint['model_state_dict'])
            start_epoch += checkpoint['epoch']
            end_epoch += checkpoint['epoch']
            average_loss_list = checkpoint['average_loss_list']
            for idx, loss in enumerate(average_loss_list):
                writer.add_scalar("Training Loss Average", loss, idx + 1)

        mean, std = self.compute_mean_std(
            datasets.CIFAR100(self.dataset_dir, train=True, download=True))

        milestone = [60, 120, 160, 180, 200, 220]

        transform_ops = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])

        train_dataset = datasets.CIFAR100(self.dataset_dir,
                                          train=True,
                                          download=True,
                                          transform=transform_ops)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=self.batch_size,
                                                   shuffle=True,
                                                   num_workers=self.num_worker)
        train_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=milestone, gamma=0.2)

        model.train()

        running_loss = 0
        running_idx = 0
        running_temp_idx = 0

        for epoch in range(start_epoch, end_epoch):
            total_loss = 0
            for batch_idx, (x, y) in enumerate(train_loader):
                x = Variable(x.cuda())
                y = Variable(y.cuda())
                optimizer.zero_grad()

                predicted = model(x)
                loss = loss_function(predicted, y)
                loss.backward()
                optimizer.step()
                total_loss += loss.data.item()

                running_loss += loss.data.item()
                running_temp_idx += 1
                if batch_idx % 100 == 0:
                    print(
                        'Train Epoch: {}  {:.2f}% Percent Finished. Current Loss: {:.6f}'
                        .format(epoch + 1, 100 * batch_idx / len(train_loader),
                                total_loss))

                if running_temp_idx % 50 == 0:
                    running_idx += 1
                    writer.add_scalar("Running Loss", running_loss / 100,
                                      running_idx)
                    runninng_temp_idx = 0
                    running_loss = 0

            writer.add_scalar("Training Loss Average",
                              total_loss / len(train_loader), epoch + 1)
            print('Epoch {} Finished! Total Loss: {:.2f}'.format(
                epoch + 1, total_loss))

            print("---------------Test Initalized!------------------")
            accuracy = test("", 128, model=model)
            writer.add_scalar("Test Accuracy", accuracy, epoch + 1)

            train_scheduler.step()

            average_loss_list.append(total_loss / len(train_loader))
            if (epoch + 1) % 50 == 0:
                torch.save(
                    {
                        'epoch': epoch + 1,
                        'model_state_dict': model.state_dict(),
                        'average_loss_list': average_loss_list,
                        'model_type': self.model_type
                    }, self.model_save_dir +
                    "vgg-checkpoint-{}.pth".format(epoch + 1))

        torch.save(
            {
                'epoch': epoch + 1,
                'model_state_dict': model.state_dict(),
                'average_loss_list': average_loss_list,
                'model_type': self.model_type
            }, self.model_save_dir + "vgg.pth".format(epoch + 1))
Exemplo n.º 15
0
    styleImg = styleImg.unsqueeze(0)
    contentImg = contentImg.unsqueeze(0)
    styleImg,contentImg,content_iq = util.luminance_transfer(styleImg.numpy(),contentImg.numpy())
    styleImg = Variable(torch.from_numpy(styleImg))
    contentImg = Variable(torch.from_numpy(contentImg))
else:
    styleImg = load_image(opt.style_image) # 1x3x512x512
    contentImg = load_image(opt.content_image) # 1x3x512x512

if(opt.cuda):
    styleImg = styleImg.cuda()
    contentImg = contentImg.cuda()

###############   MODEL   ####################
vgg = VGG()
vgg.load_state_dict(torch.load(opt.vgg_dir))
for param in vgg.parameters():
    param.requires_grad = False
if(opt.cuda):
    vgg.cuda()
###########   LOSS & OPTIMIZER   ##########
class GramMatrix(nn.Module):
    def forward(self,input):
        b, c, h, w = input.size()
        f = input.view(b,c,h*w) # bxcx(hxw)
        # torch.bmm(batch1, batch2, out=None)   #
        # batch1: bxmxp, batch2: bxpxn -> bxmxn #
        G = torch.bmm(f,f.transpose(1,2)) # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc
        return G.div_(h*w)

class styleLoss(nn.Module):
Exemplo n.º 16
0
# Model
print('==> Building model..')
if args.net == 'vgg':
    net = VGG('VGG19')

net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

# Load weights from checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.loadfile), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.loadfile)
net.load_state_dict(checkpoint['net'])

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')


def prune_weights(torchweights):
    weights = np.abs(torchweights.cpu().numpy())
    weightshape = weights.shape
    rankedweights = weights.reshape(
        weights.size).argsort()  #.reshape(weightshape)
    #print(weightshape)
    #print(rankedweights)
    max_weight = np.amax(weights)
    print("max weight", max_weight)
    num = weights.size
Exemplo n.º 17
0
def train():
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(size=32),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=False,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=False,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    model = VGG(vars(args))
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lrate,
                                momentum=0.9,
                                weight_decay=5e-4)

    if args.use_cuda:
        model = model.cuda()

    if args.eval:
        model.load_state_dict(torch.load(args.model_dir))
        model.eval()
        accuracy = model.evaluate(testloader)
        exit()

    total_size = len(trainloader)
    lrate = args.lrate
    best_score = 0.0
    scores = []
    for epoch in range(1, args.epochs + 1):
        model.train()
        for i, (image, label) in enumerate(trainloader):

            loss = model(image, label)
            model.zero_grad()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 100 == 0:
                print('Epoch = %d, step = %d / %d, loss = %.5f lrate = %.5f' %
                      (epoch, i, total_size, loss, lrate))

        model.eval()
        accuracy = model.evaluate(testloader)
        scores.append(accuracy)

        with open(args.model_dir + "_scores.pkl", "wb") as f:
            pkl.dump(scores, f)

        if best_score < accuracy:
            best_score = accuracy
            print('saving %s ...' % args.model_dir)
            torch.save(model.state_dict(), args.model_dir)

        if epoch % args.decay_period == 0:
            lrate *= args.decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = lrate
Exemplo n.º 18
0
        ]),
    }

    image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in [train, val]}

    dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],
                                                 batch_size=8,
                                                 shuffle=True,
                                                 num_workers=4) for x in [train, val]}

    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', val]}

    use_gpu = torch.cuda.is_available()

    model = VGG(2)

    if os.path.exists(save_path):
        model.load_state_dict(torch.load(save_path))

    if use_gpu:
        model = model.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.95)

    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    print ('*' * 10)
    print ('start training')
    trainval(dataloders, model, optimizer, scheduler, criterion, dataset_sizes, phase='train')
Exemplo n.º 19
0
    contentImg = contentImg.unsqueeze(0)
    styleImg, contentImg, content_iq = util.luminance_transfer(
        styleImg.numpy(), contentImg.numpy())
    styleImg = Variable(torch.from_numpy(styleImg))
    contentImg = Variable(torch.from_numpy(contentImg))
else:
    styleImg = load_image(opt.style_image)  # 1x3x512x512
    contentImg = load_image(opt.content_image)  # 1x3x512x512

if (opt.cuda):
    styleImg = styleImg.cuda()
    contentImg = contentImg.cuda()

###############   MODEL   ####################
vgg = VGG()
vgg.load_state_dict(torch.load(opt.vgg_dir))
for param in vgg.parameters():
    param.requires_grad = False
if (opt.cuda):
    vgg.cuda()


###########   LOSS & OPTIMIZER   ##########
class GramMatrix(nn.Module):
    def forward(self, input):
        b, c, h, w = input.size()
        f = input.view(b, c, h * w)  # bxcx(hxw)
        # torch.bmm(batch1, batch2, out=None)   #
        # batch1: bxmxp, batch2: bxpxn -> bxmxn #
        G = torch.bmm(f, f.transpose(
            1, 2))  # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc
Exemplo n.º 20
0
class counterGAN():
    def __init__(self,device):
        
        self.nz = 100
        self.beta1 = 0.5
        self.real_label = 1
        self.fake_label = 0
        self.L = 100
        self.device = device
        self.iters = 0

        self.netG = Generator(self.nz).to(self.device)
        self.netD = Discriminator().to(self.device)
        self.netTarget = VGG('VGG16').to(self.device)
        self.netTarget.load_state_dict(torch.load('BestClassifierModel.pth'))
        self.fixed_noise = torch.randn(64, self.nz, 1, 1, device=self.device)

        self.netG.apply(self.weights_init)
        self.netD.apply(self.weights_init)

        self.optimizerG = optim.Adam(self.netG.parameters(), lr=2e-4, betas=(self.beta1,0.999))
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=2e-4, betas=(self.beta1,0.999))
        self.criterion = nn.BCELoss()
        self.criterionTarget = nn.CrossEntropyLoss()
        self.criterionPerturbation = nn.MSELoss()

    def weights_init(self,m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            nn.init.normal_(m.weight.data, 0.0, 0.02)

    def save_state_dicts(self,path):
        
        torch.save({
            'netG_state_dict':self.netG.state_dict(),
            'netD_state_dict':self.netD.state_dict(),
            'optimizerG_state_dict':self.optimizerG.state_dict(),
            'optimizerD_state_dict':self.optimizerD.state_dict()
        },path)
        
        # print('Saved state dicts')

    def load_state_dicts(self,path):
        
        self.netG.load_state_dict(torch.load(path)['netG_state_dict'])
        self.netD.load_state_dict(torch.load(path)['netD_state_dict'])
        self.optimizerG.load_state_dict(torch.load(path)['optimizerG_state_dict'])
        self.optimizerD.load_state_dict(torch.load(path)['optimizerD_state_dict'])

        print('Loaded state dicts')

    def train(self,init_epoch, num_epochs,dataloader):

        G_losses = []
        D_losses = []
        T_losses = []
        P_losses = []
        Losses = []
        img_list = []
        self.iters = 0

        for epoch in range(init_epoch,num_epochs):
            for idx,D in enumerate(dataloader['train']):
                self.netG.train()
                self.netD.train()
                
                #Train Discriminator on real image
                self.netD.zero_grad()

                image = D[0].to(self.device)
                target_labels = D[1].to(self.device)
                batch_size = image.size()[0]
                label = torch.full((batch_size,),self.real_label,dtype=torch.float,device=self.device)

                out_real = self.netD(image).view(-1)
                loss_d_real = self.criterion(out_real,label)
                # loss_d_real.backward()

                D_x = out_real.mean().item()

                #Train Discriminator on generated images
                noise = torch.randn(batch_size,self.nz,1,1,device=self.device)

                generated = self.netG(noise)+image
                label = torch.full((batch_size,),self.fake_label,dtype=torch.float,device=self.device)

                out_generated = self.netD(generated.clone().detach()).view(-1)
                loss_d_generated = self.criterion(out_generated,label.detach())
                # loss_d_generated.backward()

                D_G_z1 = out_generated.mean().item()

                loss_d = loss_d_generated+loss_d_real

                # self.optimizerD.step()

                #Train Generator
                self.netG.zero_grad()
                self.netTarget.zero_grad()

                label = torch.full((batch_size,),self.real_label,dtype=torch.float,device=self.device)

                out_generated_2 = self.netD(generated.clone()).view(-1)
                loss_g = self.criterion(out_generated_2,label)
                # loss_g.backward(retain_graph=True)

                out_classifier = self.netTarget(generated.clone())
                loss_c = self.criterionTarget(out_classifier,target_labels)
                # loss_c.backward()

                D_G_z2 = out_generated_2.mean().item()

                loss_p = self.criterionPerturbation(image,generated)

                loss = loss_d+loss_c+loss_g-loss_p
                loss.backward()
                self.optimizerD.step()
                self.optimizerG.step()

                # self.optimizerG.step()

                if idx%50==0:
                    print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tLoss_C: %.4f\tLoss_P: %.4f\tOverall loss: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, idx, len(dataloader['train']),
                     loss_d.item(), loss_g.item(), loss_c.item(), loss_p.item(), loss.item(), D_x, D_G_z1, D_G_z2))
                
                G_losses.append(loss_g.item())
                D_losses.append(loss_d.item())
                T_losses.append(loss_c.item())
                P_losses.append(loss_p.item())
                Losses.append(loss.item())

                # Check how the generator is doing by saving G's output on fixed_noise
                if (self.iters % 500 == 0) or ((epoch == num_epochs-1) and (idx == len(dataloader['train'])-1)):
                    with torch.no_grad():
                        generated = (self.netG(self.fixed_noise)+image[:64]).detach().cpu()
                        img_list.append(vutils.make_grid(generated, padding=2, normalize=True))
                self.iters += 1

                self.save_state_dicts('BestcounterGAN.pth')

            if epoch%5==0:

                self.visualize_images(img_list,epoch)

        return D_losses,G_losses,img_list


    def visualize_images(self,img_list,epoch=None):

        if epoch==None:
            
            epoch = ''
        
        fig = plt.figure(figsize=(8,8))
        plt.axis("off")
        plt.title("Generated Images")
        plt.imshow(np.transpose(img_list[-1],(1,2,0)))
        plt.savefig('./images/Image_{}.png'.format(epoch))

        return