Beispiel #1
0
    D.weight_init(0.0, 0.02)
    optimizer_G = torch.optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
    optimizer_D = torch.optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
    criterion = nn.BCELoss()

    # the labels used in G (1 * 10 * 1 * 1)
    labels_G = torch.zeros(10, 10)
    labels_G = labels_G.scatter_(1, torch.LongTensor(list(range(10))).reshape(10, 1), 1).reshape(10, 10, 1, 1)
    # the labels used in D (1 * 10 * 32 * 32)
    labels_D = torch.zeros([10, 10, 32, 32])
    for i in range(10):
        labels_D[i, i, :, :] = 1

    transform = tfs.Compose([
        tfs.Resize(32),
        tfs.ToTensor(),
        tfs.Normalize(mean=[0.5], std=[0.5])
    ])

    # 100 fixed samples
    fixed_noise = torch.randn(100, 100).reshape(-1, 100, 1, 1)

    fixed_y = []
    for i in range(10):
        fixed_y += [i] * 10
    fixed_y = torch.LongTensor(fixed_y).reshape(-1, 1)
    fixed_label_G = labels_G[fixed_y].reshape(-1, 10, 1, 1)

    fixed_noise_var = Variable(fixed_noise.cuda())
    fixed_y_label_var = Variable(fixed_label_G.cuda())
Beispiel #2
0
beta = 4
save_iter = 20

shape = (28, 28)
n_obs = shape[0] * shape[1]

# create DAE and ß-VAE and their training history
dae = DAE(n_obs, num_epochs, batch_size, 1e-3, save_iter, shape)
beta_vae = BetaVAE(n_obs, num_epochs, batch_size, 1e-4, beta, save_iter, shape)
history = History()

# fill autoencoder training history with examples
print('Filling history...', end='', flush=True)

transformation = transforms.Compose([
    transforms.ColorJitter(),
    transforms.ToTensor()
])

dataset = MNIST('data', transform=transformation)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True)

for data in dataloader:
    img, _ = data
    img = img.view(img.size(0), -1).numpy().tolist()
    history.store(img)
print('DONE')

# train DAE
dae.train(history)

# train ß-VAE
Beispiel #3
0
import torchvision.datasets
import torchvision.transforms as transforms
from src.data.utils import *

root_dir = './data/'

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

train_dataset = torchvision.datasets.MNIST(
    root_dir, 
    train=True, 
    transform=transform, 
    download=True,
)
test_dataset = torchvision.datasets.MNIST(
    root_dir, 
    train=False, 
    transform=transform, 
    download=True,
)

def niid(params):
    num_user = params['Trainer']['n_clients']
    s = params['Dataset']['s']
    dataset_split = split_dataset_by_percent(train_dataset, test_dataset, s, num_user)
    testset_dict = {
        'train': None,
        'test': test_dataset,
Beispiel #4
0
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out


def ResNet18():
    return ResNet(BasicBlock, [2, 2, 2, 2])


# setup dataset
data_dir = os.getenv('DATA_DIR')

train_transform = transforms.Compose([
    # transforms.RandomRotation(degrees=[0, 360]),
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

test_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

cifar_train = ImageDataCLS(root=data_dir,
                           train=True,
                           transform=train_transform)
# use for infer uncertainty, dont apply transform
cifar_infer = ImageDataCLS(root=data_dir, train=True, transform=test_transform)
Beispiel #5
0
        img = self.transform(img)

        mask = Image.open(self.df.iloc[index, 1]).convert('L')
        mask = self.transform(mask)

        return {
            'image': img,
            'mask': mask
        }

    def __len__(self):
        return len(self.df)


train_transform = transforms.Compose([
    transforms.Resize([321, 321]),
    transforms.ToTensor(),
])

if __name__ == '__main__':
    dir_img = "/home/muyun99/Desktop/supervisely/train/"
    dir_mask = "/home/muyun99/Desktop/supervisely/train_mask/"
    scale = 1.0
    val_percent = 0.1
    batch_size = 8

    train_dataset = BasicDataset(file_csv="/home/muyun99/Desktop/supervisely/csv_25/train.csv",
                                 transform=train_transform)
    val_dataset = BasicDataset(file_csv="/home/muyun99/Desktop/supervisely/csv_25/valid.csv",
                               transform=train_transform)
    test_dataset = BasicDataset(file_csv="/home/muyun99/Desktop/supervisely/csv_25/test.csv",
                                transform=train_transform)
Beispiel #6
0
        return center_offset,point_form


if __name__ == '__main__':
    # Placing above line is required to solve the issue:
    # https://github.com/pytorch/pytorch/issues/5858
    resnet_features = ResNet18Reduced()
    resnet_features.load_state_dict(torch.load('weights/resnet18reduced.pth'))
    
    
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder('images', transforms.Compose([
            transforms.RandomSizedCrop(400),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=1, shuffle=True,
        num_workers=1, pin_memory=True)
        
    for a,b in train_loader:
        break
    
    #res_res = resnet_features(a)
    
    net = JanuaryNet(resnet_features, 3)
    net.init(weight_init)
    
    loc,conf = net(a)
    
import util.loader.transform as tr
from util.loader.loader import DAVIS2016
from util.metrics import runningScore
from util.loss import *
from util.utils import *
from util.helpers import *

from PIL import Image

img_rows   = 256
img_cols   = 512
batch_size = 4
lr         = 1e-4

ttransforms = transforms.Compose([tr.ScaleNRotate(), tr.RandomHorizontalFlip(), tr.Resize([img_rows, img_cols]), tr.ToTensor()])
tdataset = DAVIS2016(root='../dataset/davis.pkl', split='train', transform=ttransforms)
tdataloader = torch.utils.data.DataLoader(tdataset, batch_size=batch_size, shuffle=True, num_workers=8)

vtransforms = transforms.Compose([tr.Resize([img_rows, img_cols]), tr.ToTensor()])
vdataset = DAVIS2016(root='../dataset/davis.pkl', split='val', transform=vtransforms)
vdataloader = torch.utils.data.DataLoader(vdataset, batch_size=1, shuffle=False, num_workers=8)

# Setup Metrics
running_metrics = runningScore(pspnet_specs['n_classes'])

# setup Model
base_net = BaseNet()
class_net = ClassNet()

base_net.cuda()
Beispiel #8
0
import matplotlib.patches as patches
#사이즈 지정
IMG_SIZE = 480
THRESHOLD = 0.95

#모델 가져오기 ( 다운로드 받아둠)
model = models.detection.keypointrcnn_resnet50_fpn(pretrained=True).eval()
IMG_SIZE = 480
THRESHOLD = 0.95

#img load
img = Image.open('img_data.jpg')
img = img.resize((IMG_SIZE, int(img.height * IMG_SIZE / img.width)))

plt.figure(figsize=(16, 16))
trf = T.Compose([T.ToTensor()])

input_img = trf(img)
out = model([input_img])[0]
#박스, 라벨, 점수, 키포인트 키포인트 스코어 점수 가 out에 딕셔너리로 저장.

codes = [Path.MOVETO, Path.LINETO, Path.LINETO]

fig, ax = plt.subplots(1, figsize=(16, 16))
ax.imshow(img)

for box, score, keypoints in zip(out['boxes'], out['scores'],
                                 out['keypoints']):
    score = score.detach().numpy()

    if score < THRESHOLD:
Beispiel #9
0
def main():
    global best_acc

    if not os.path.isdir(args.out):
        mkdir_p(args.out)

    # Data
    print(f'==> Preparing cifar10')
    transform_train = transforms.Compose([
        dataset.RandomPadandCrop(32),
        dataset.RandomFlip(),
        dataset.ToTensor(),
    ])

    transform_val = transforms.Compose([
        dataset.ToTensor(),
    ])

    train_labeled_set, train_unlabeled_set, val_set, test_set = dataset.get_cifar10(
        './data',
        args.n_labeled,
        transform_train=transform_train,
        transform_val=transform_val)
    labeled_trainloader = data.DataLoader(train_labeled_set,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=0,
                                          drop_last=True)
    unlabeled_trainloader = data.DataLoader(train_unlabeled_set,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0,
                                            drop_last=True)
    val_loader = data.DataLoader(val_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=0)
    test_loader = data.DataLoader(test_set,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=0)

    # Model
    print("==> creating WRN-28-2")

    def create_model(ema=False):
        model = models.WideResNet(num_classes=10)
        model = model.cuda()

        if ema:
            for param in model.parameters():
                param.detach_()

        return model

    model = create_model()
    ema_model = create_model(ema=True)

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    train_criterion = SemiLoss()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    ema_optimizer = WeightEMA(model, ema_model, alpha=args.ema_decay)
    start_epoch = 0

    # Resume
    title = 'noisy-cifar-10'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.out = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        ema_model.load_state_dict(checkpoint['ema_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.out, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
        logger.set_names([
            'Train Loss', 'Train Loss X', 'Train Loss U', 'Valid Loss',
            'Valid Acc.', 'Test Loss', 'Test Acc.'
        ])

    writer = SummaryWriter(args.out)
    step = 0
    test_accs = []
    # Train and val
    for epoch in range(start_epoch, args.epochs):

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_loss_x, train_loss_u = train(
            labeled_trainloader, unlabeled_trainloader, model, optimizer,
            ema_optimizer, train_criterion, epoch, use_cuda)
        _, train_acc = validate(labeled_trainloader,
                                ema_model,
                                criterion,
                                epoch,
                                use_cuda,
                                mode='Train Stats')
        val_loss, val_acc = validate(val_loader,
                                     ema_model,
                                     criterion,
                                     epoch,
                                     use_cuda,
                                     mode='Valid Stats')
        test_loss, test_acc = validate(test_loader,
                                       ema_model,
                                       criterion,
                                       epoch,
                                       use_cuda,
                                       mode='Test Stats ')

        step = args.val_iteration * (epoch + 1)

        writer.add_scalar('losses/train_loss', train_loss, step)
        writer.add_scalar('losses/valid_loss', val_loss, step)
        writer.add_scalar('losses/test_loss', test_loss, step)

        writer.add_scalar('accuracy/train_acc', train_acc, step)
        writer.add_scalar('accuracy/val_acc', val_acc, step)
        writer.add_scalar('accuracy/test_acc', test_acc, step)

        # append logger file
        logger.append([
            train_loss, train_loss_x, train_loss_u, val_loss, val_acc,
            test_loss, test_acc
        ])

        # save model
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'ema_state_dict': ema_model.state_dict(),
                'acc': val_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)
        test_accs.append(test_acc)
    logger.close()
    writer.close()

    print('Best acc:')
    print(best_acc)

    print('Mean acc:')
    print(np.mean(test_accs[-20:]))
import matplotlib.pyplot as plt
import torch
import numpy as np
import argparse
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models

train_transforms = transforms.Compose([
    transforms.RandomRotation(30),
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

valid_transforms = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

test_transforms = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
import os
from torchvision import datasets, transforms
import torch
import matplotlib.pyplot as plt
import sys

size = 128
cropped_size = 128

train_transforms = transforms.Compose([
    transforms.Resize(size),
    transforms.CenterCrop(size),
    transforms.ToTensor()
])

test_transforms = transforms.Compose([
    transforms.Resize(size),
    transforms.CenterCrop(size),
    transforms.ToTensor()
])

# data_dir = "./dogImages"
data_dir = "./dogImagesSample"
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train',
                                  transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
valid_data = datasets.ImageFolder(data_dir + '/valid',
                                  transform=test_transforms)

trainloader = torch.utils.data.DataLoader(train_data, batch_size=64)
def main():
    global BEST_ACC, LR_STATE
    start_epoch = cfg.CLS.start_epoch  # start from epoch 0 or last checkpoint epoch

    # Create ckpt folder
    if not os.path.isdir(cfg.CLS.ckpt):
        mkdir_p(cfg.CLS.ckpt)
    if args.cfg_file is not None and not cfg.CLS.evaluate:
        shutil.copyfile(args.cfg_file, os.path.join(cfg.CLS.ckpt, args.cfg_file.split('/')[-1]))

    # Dataset and Loader
    normalize = transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)
    if cfg.CLS.train_crop_type == 'center':
        train_aug = [
                     transforms.Resize(cfg.CLS.base_size),
                     transforms.CenterCrop(cfg.CLS.crop_size),
                     transforms.RandomHorizontalFlip(),
                    ]
    elif cfg.CLS.train_crop_type == 'random_resized':
        train_aug = [transforms.RandomResizedCrop(cfg.CLS.crop_size),
                     transforms.RandomHorizontalFlip()]
    else:
        train_aug = [transforms.RandomHorizontalFlip()]
    if len(cfg.CLS.rotation) > 0:
        train_aug.append(transforms.RandomRotation(cfg.CLS.rotation))
    if len(cfg.CLS.pixel_jitter) > 0:
        train_aug.append(RandomPixelJitter(cfg.CLS.pixel_jitter))
    if cfg.CLS.grayscale > 0:
        train_aug.append(transforms.RandomGrayscale(cfg.CLS.grayscale))
    train_aug.append(transforms.ToTensor())
    train_aug.append(normalize)

    val_aug = [
                transforms.Resize(cfg.CLS.base_size),
               transforms.CenterCrop(cfg.CLS.crop_size),
               transforms.ToTensor(),
               normalize, ]
    if os.path.isfile(cfg.CLS.train_root):
        # if cfg.CLS.have_data_list:
        train_datasets = CustomData(img_path=cfg.CLS.data_root,
                                    txt_path=cfg.CLS.train_root,
                                    data_transforms=transforms.Compose(train_aug))

        val_datasets = CustomData(img_path=cfg.CLS.data_root,
                                  txt_path=cfg.CLS.val_root,
                                  data_transforms=transforms.Compose(val_aug))
        # else:
    elif os.path.isdir(cfg.CLS.data_root + cfg.CLS.train_root):
        traindir = os.path.join(cfg.CLS.data_root, cfg.CLS.train_root)
        train_datasets = datasets.ImageFolder(traindir, transforms.Compose(train_aug))

        valdir = os.path.join(cfg.CLS.data_root, cfg.CLS.val_root)
        val_datasets = datasets.ImageFolder(valdir, transforms.Compose(val_aug))

    train_loader = torch.utils.data.DataLoader(train_datasets,
                                               batch_size=cfg.CLS.train_batch, shuffle=False,
                                               sampler=RandomIdentitySampler(train_datasets,num_instances=4),
                                               num_workers=cfg.workers, pin_memory=True,drop_last=True)
    print(type(train_loader))

    if cfg.CLS.validate or cfg.CLS.evaluate:
        val_loader = torch.utils.data.DataLoader(val_datasets,
                                                 batch_size=cfg.CLS.test_batch, shuffle=False,
                                                 num_workers=cfg.workers, pin_memory=True,drop_last=True)

    # Create model
    model = models.__dict__[cfg.CLS.arch]()
    print(model)
    # Calculate FLOPs & Param
    n_flops, n_convops, n_params = measure_model(model, cfg.CLS.crop_size, cfg.CLS.crop_size)
    print('==> FLOPs: {:.4f}M, Conv_FLOPs: {:.4f}M, Params: {:.4f}M'.
          format(n_flops / 1e6, n_convops / 1e6, n_params / 1e6))
    del model
    model = models.__dict__[cfg.CLS.arch]()

    # Load pre-train model
    if cfg.CLS.pretrained:
        print("==> Using pre-trained model '{}'".format(cfg.CLS.pretrained))
        pretrained_dict = torch.load(cfg.CLS.pretrained)
        try:
            pretrained_dict = pretrained_dict['state_dict']
        except:
            pretrained_dict = pretrained_dict
        model_dict = model.state_dict()
        updated_dict, match_layers, mismatch_layers = weight_filler(pretrained_dict, model_dict)
        model_dict.update(updated_dict)
        model.load_state_dict(model_dict)
    else:
        print("==> Creating model '{}'".format(cfg.CLS.arch))

    # Define loss function (criterion) and optimizer
    #criterion = nn.CrossEntropyLoss().cuda()
    criterion = TripletLoss(margin=args.margin).cuda()
    if cfg.CLS.pretrained:
        def param_filter(param):
            return param[1]

        new_params = map(param_filter, filter(lambda p: p[0] in mismatch_layers, model.named_parameters()))
        base_params = map(param_filter, filter(lambda p: p[0] in match_layers, model.named_parameters()))
        model_params = [{'params': base_params}, {'params': new_params, 'lr': cfg.CLS.base_lr * 10}]
    else:
        model_params = model.parameters()
    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    optimizer = optim.SGD(model_params, lr=cfg.CLS.base_lr, momentum=cfg.CLS.momentum,
                          weight_decay=cfg.CLS.weight_decay)

    # Evaluate model
    if cfg.CLS.evaluate:
        print('\n==> Evaluation only')
        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, start_epoch, USE_CUDA)
        print('==> Test Loss: {:.8f} | Test_top1: {:.4f}% | Test_top5: {:.4f}%'.format(test_loss, test_top1, test_top5))
        return

    # Resume training
    title = 'Pytorch-CLS-' + cfg.CLS.arch
    if cfg.CLS.resume:
        # Load checkpoint.
        print("==> Resuming from checkpoint '{}'".format(cfg.CLS.resume))
        assert os.path.isfile(cfg.CLS.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(cfg.CLS.resume)
        BEST_ACC = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(cfg.CLS.ckpt, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(cfg.CLS.ckpt, 'log.txt'), title=title)
        #logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
        logger.set_names(['Learning Rate', 'Train Loss', 'Train Acc.'])

    # Train and val
    for epoch in range(start_epoch, cfg.CLS.epochs):
        print('\nEpoch: [{}/{}] | LR: {:.8f}'.format(epoch + 1, cfg.CLS.epochs, LR_STATE))

        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, USE_CUDA)
        # top1 =  train_acc
        # BEST_ACC = max(top1, BEST_ACC)
        if cfg.CLS.validate:
            #test_loss, test_top1, test_top5 = test(val_loader, model, criterion, epoch, USE_CUDA)
            top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
            best_acc = max(top1, BEST_ACC)

            print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, top1, best_acc))
        #else:
            #test_loss, test_top1, test_top5 = 0.0, 0.0, 0.0

        # Append logger file
        #logger.append([LR_STATE, train_loss, test_loss, train_acc, test_top1])
        logger.append([LR_STATE, train_loss , train_acc])

        # Save model
        save_checkpoint(model, optimizer, train_acc, epoch)
        # Draw curve
        try:
            draw_curve(cfg.CLS.arch, cfg.CLS.ckpt)
            print('==> Success saving log curve...')
        except:
            print('==> Saving log curve error...')

    logger.close()
    try:
        savefig(os.path.join(cfg.CLS.ckpt, 'log.eps'))
        shutil.copyfile(os.path.join(cfg.CLS.ckpt, 'log.txt'), os.path.join(cfg.CLS.ckpt, 'log{}.txt'.format(
            datetime.datetime.now().strftime('%Y%m%d%H%M%S'))))
    except:
        print('Copy log error.')
    print('==> Training Done!')
    print('==> Best acc: {:.4f}%'.format(best_top1))
Beispiel #13
0
def train_correspondence_block(json_file, cls, gpu, synthetic, epochs=50, batch_size=64, val_ratio=0.2,
                               save_model=True, iter_print=10):
    """
    Training a UNnet for each class using real train and/or synthetic data
    Args:
        json_file: .txt file which stores the directory of the training images
        cls: the class to train on, from 1 to 6
        gpu: gpu id to use
        synthetic: whether use synthetic data or not
        epochs: number of epochs to train
        batch_size: batch size
        val_ratio: validation ratio during training
        save_model: save model or not
        iter_print: print training results per iter_print iterations

    """
    train_data = NOCSDataset(json_file, cls, synthetic=synthetic, resize=64,
                             transform=transforms.Compose([transforms.ColorJitter(brightness=(0.6, 1.4),
                                                                                  contrast=(0.8, 1.2),
                                                                                  saturation=(0.8, 1.2),
                                                                                  hue=(-0.01, 0.01)),
                                                           AddGaussianNoise(10 / 255)]))
    print('Size of trainset ', len(train_data))
    indices = list(range(len(train_data)))
    np.random.shuffle(indices)

    num_train = len(indices)
    split = int(np.floor(num_train * val_ratio))
    train_idx, valid_idx = indices[split:], indices[:split]

    # define samplers for obtaining training and validation batches
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    # prepare data loaders (combine dataset and sampler)
    num_workers = 4
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                               sampler=train_sampler, num_workers=num_workers)
    val_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                             sampler=valid_sampler, num_workers=num_workers)
    device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
    print("device: ", f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
    # architecture for correspondence block - 13 objects + backgound = 14 channels for ID masks
    correspondence_block = UNet()
    correspondence_block = correspondence_block.to(device)

    # custom loss function and optimizer
    criterion_x = nn.CrossEntropyLoss()
    criterion_y = nn.CrossEntropyLoss()
    criterion_z = nn.CrossEntropyLoss()

    # specify optimizer
    optimizer = optim.Adam(correspondence_block.parameters(), lr=3e-4, weight_decay=3e-5)

    # training loop
    val_loss_min = np.Inf
    save_path = model_save_path(cls)
    writer = SummaryWriter(save_path.parent / save_path.stem / datetime.now().strftime("%d%H%M"))

    for epoch in range(epochs):
        t0 = time.time()
        train_loss = 0
        val_loss = 0
        print("------ Epoch ", epoch, " ---------")
        correspondence_block.train()
        print("training")
        for iter, (rgb, xmask, ymask, zmask, adr_rgb) in enumerate(train_loader):

            rgb = rgb.to(device)
            xmask = xmask.to(device)
            ymask = ymask.to(device)
            zmask = zmask.to(device)

            optimizer.zero_grad()
            xmask_pred, ymask_pred, zmask_pred = correspondence_block(rgb)

            loss_x = criterion_x(xmask_pred, xmask)
            loss_y = criterion_y(ymask_pred, ymask)
            loss_z = criterion_z(zmask_pred, zmask)

            loss = loss_x + loss_y + loss_z

            loss.backward()
            optimizer.step()
            train_loss += loss.item()

            if iter % iter_print == 0:
                print(
                    'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                        format(epoch, iter * len(rgb), len(train_loader.dataset),
                               100. * iter / len(train_loader), loss.item()))

        correspondence_block.eval()

        print("validating")
        for rgb, xmask, ymask, zmask, _ in val_loader:
            rgb = rgb.to(device)
            xmask = xmask.to(device)
            ymask = ymask.to(device)
            zmask = zmask.to(device)

            xmask_pred, ymask_pred, zmask_pred = correspondence_block(rgb)

            loss_x = criterion_x(xmask_pred, xmask)
            loss_y = criterion_y(ymask_pred, ymask)
            loss_z = criterion_z(zmask_pred, zmask)

            loss = loss_x + loss_y + loss_z
            val_loss += loss.item()

        # calculate average losses
        train_loss = train_loss / len(train_loader.sampler)
        val_loss = val_loss / len(val_loader.sampler)
        t_end = time.time()
        print(f'{t_end - t0} seconds')
        writer.add_scalar('train loss', train_loss, epoch)
        writer.add_scalar('val loss', val_loss, epoch)
        writer.add_scalar('epoch time', t_end - t0, epoch)

        print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
            epoch, train_loss, val_loss))

        # save model if validation loss has decreased
        if val_loss <= val_loss_min:
            print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
                val_loss_min,
                val_loss))
            if save_model:
                torch.save(correspondence_block.state_dict(), save_path)
            val_loss_min = val_loss
    writer.close()
Beispiel #14
0
import torch.nn.parallel
import os
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.datasets as datasets
import numpy as np
import csv
import matplotlib.image as mpimg

normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                 std=[0.2023, 0.1994, 0.2010])
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
    root='./data',
    train=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])),
                                         batch_size=128,
                                         shuffle=False,
                                         num_workers=2,
                                         pin_memory=True)

total_num = 5000
net_names = ['012052', '012072']
parameters = [(1, 20, 16.0)]
from resnet import ResNet18 as n00


class Ensemble(nn.Module):
    def __init__(self, net1, net2, net3, net4, net5):
        super(Ensemble, self).__init__()
Beispiel #15
0
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")

plt.ion()  # interactive mode

device = torch.device("cpu")
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

#Parameters
#learning_rate = 1e-4
epochs = 10

data_transform_train = transforms.Compose([
    transforms.RandomSizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

data_transform_val = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

#Loading Dataset
miniImageNet_trainset = datasets.ImageFolder(root='/home/nvidia/miniset/train',
                                             transform=data_transform_train)

miniImageNet_validationset = datasets.ImageFolder(
Beispiel #16
0
n_box = 5

grid_w = int(img_w / box_size[1])
grid_h = int(img_h / box_size[0])
save_path = save_dir + save_name + str(nn) + ".pt"
out_len = 5 + nclazz
fin_size = n_box * out_len
input_vec = [grid_w, grid_h, n_box, out_len]
anchors = np.array(anchors)

animal_dataset_valid = AnimalBoundBoxDataset(root_dir=files_location_valid,
                                                inputvec=input_vec,
                                                anchors=anchors,
                                                maxann=max_annotations,
                                                transform=transforms.Compose([
                                                    MakeMat(input_vec, anchors),
                                                    ToTensor()
                                                    ]),
                                                gray=grey_tf
                                                )

animalloader_valid = DataLoader(animal_dataset_valid, batch_size=1, shuffle=False)

layerlist = get_weights(weightspath)

net = YoloNetOrig(layerlist, fin_size, channels_in)
net = net.to(device)
net.load_state_dict(torch.load(save_path))
net.eval()

tptp = 0
fpfp = 0
# pytorch_learning_19_validation
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms


batch_size = 200
learning_rate = 0.01
epochs = 10

train_db = datasets.MNIST('../pytorch_learning_02_MNIST/mnist_data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ]))
train_loader = torch.utils.data.DataLoader(
    train_db,
    batch_size=batch_size, shuffle=True)

test_db = datasets.MNIST('../pytorch_learning_02_MNIST/mnist_data', train=False, transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
]))
test_loader = torch.utils.data.DataLoader(test_db,
    batch_size=batch_size, shuffle=True)


print('train:', len(train_db), 'test:', len(test_db))
train_db, val_db = torch.utils.data.random_split(train_db, [50000, 10000])
import torch.nn.functional as F
import torch.optim as opt
from torchvision import datasets,transforms
from visdom import Visdom
import numpy

#预设参数:训练批次,学习率,和迭代次数
batch_size = 200
learning_rate = 0.01
epochs = 10
viz = Visdom()
#使用Pytorch内置函数,设置训练集
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data',train=True,download=True,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,),(0.3081,))
                    ])),   #数据集的保存目录,训练和下载标记,变化换为Tensor,标准化
    batch_size=batch_size,shuffle=True)    #训练批次,打乱数据集

#使用Pytorch内置函数,设置测试集
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data',train=False,download=True,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,),(0.3081,))
                    ])),
    batch_size=batch_size,shuffle=True)

#定义一个封装好的类
class MLP(nn.Module):
Beispiel #19
0
                    action='store_true',
                    help='resume from checkpoint')
args = parser.parse_args()

use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
    torch.cuda.manual_seed(1)
best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    #    transforms.RandomCrop(32, padding=4),
    #    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = torchvision.datasets.CIFAR10(
    root='/homes/crosarko/pytorch-cifar/data',
    train=True,
    download=True,
    transform=transform_train)
shuffle(trainset.train_labels)
print(trainset.train_labels)
Beispiel #20
0
def main():
    start = time.time()
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train (default: 5)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    parser.add_argument('--file-name',
                        type=str,
                        default='test_' + str(int(start))[-3:],
                        metavar='filename',
                        help='Name of file to store model and losses')
    parser.add_argument(
        '--quant-type',
        type=str,
        default='none',
        metavar='qtype',
        help='Type of quantisation used on activation functions')
    parser.add_argument('--bit-res',
                        type=int,
                        default=4,
                        metavar='bitres',
                        help='Bit resolution of activation funtion')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    qt = args.quant_type
    if qt == 'dumb':
        model = DumbNet().to(device)
        print("Building dumb {0} bit network".format(args.bit_res))
    elif qt == 'lit':
        model = LitNet().to(device)
        print("Building LIT {0} bit network".format(args.bit_res))
    else:
        model = Net().to(device)
        print("\nBuilding full resolution network")

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    losses_train = np.zeros((args.epochs))
    losses_test = np.zeros((args.epochs))

    start = time.time()

    for epoch in range(1, args.epochs + 1):
        epoch_train_loss = train(args, model, device, train_loader, optimizer,
                                 epoch)
        epoch_test_loss = test(args, model, device, test_loader)
        losses_train[epoch - 1] = epoch_train_loss
        losses_test[epoch - 1] = epoch_test_loss
        current_time = time.time() - start
        print('\nEpoch: {:d}'.format(epoch))
        print('Training set loss: {:.6f}'.format(epoch_train_loss))
        print('Test set loss: {:.6f}'.format(epoch_test_loss))
        print('Time taken: {:.6f}s'.format(current_time))

    if (args.save_model):
        if not os.path.exists('models'):
            os.mkdir('models')
        torch.save(model.state_dict(), 'models/' + args.file_name + '.pt')
        if not os.path.exists('data'):
            os.mkdir('data')
        losses = np.stack((losses_train, losses_test), axis=1)
        np.savetxt('data/' + args.file_name + '.txt', losses, delimiter=', ')

    fig = plt.figure()
    ax = fig.gca()
    ax.set_title('Loss per Epoch')
    plt.plot(losses_train)
    plt.plot(losses_test)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    blue_line = mpatches.Patch(color='blue', label='Training Loss')
    orange_line = mpatches.Patch(color='orange', label='Testing Loss')
    plt.legend(handles=[blue_line, orange_line])
    plt.show()
Beispiel #21
0
from torch.optim import lr_scheduler
import numpy as np
from torch.utils.data import DataLoader
import os
import matplotlib.pyplot as plt
import time
import copy
import torch.optim as optim

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

data_transforms = {
    'train':
    transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

data_dir = '../data_files/data/hymenoptera_data'
image_datasets = {
    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
    for x in ['train', 'val']
Beispiel #22
0
def main():
    # Training settings
    # Use the command line to modify the default settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=32, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--step', type=int, default=1, metavar='N',
                        help='number of epochs between learning rate reductions (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--traintest', action='store_true', default=False,
                        help='train on fractions of data and test')
    parser.add_argument('--show_wrong', action='store_true', default=False,
                        help='show incorrectly labelled test images')
    parser.add_argument('--show_features', action='store_true', default=False,
                        help='show first layer features')
    parser.add_argument('--show_tsne', action='store_true', default=False,
                        help='show first layer features')
    parser.add_argument('--evaluate', action='store_true', default=False,
                        help='evaluate your model on the official test set')
    parser.add_argument('--load-model', type=str,
                        help='model file path')

    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")
    print("Device:", device)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # show features
    if args.show_features:
        assert os.path.exists(args.load_model)

        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        conv1 = model.conv1.weight.detach().cpu()

        plt.figure()
        for i, filt in enumerate(conv1):
            plt.subplot(331 + i)
            plt.axis('off')
            plt.title(f'Filter {i+1}')
            plt.imshow(filt[0])
            if i == 8:
                break
        plt.show()
        return


    # Evaluate on the official test set
    if args.evaluate or args.show_wrong:
        assert os.path.exists(args.load_model)

        # Set the test model
        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        test_dataset = datasets.MNIST('../data', train=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                    ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)

        test(model, device, test_loader, show_wrong=args.show_wrong, tsne=args.show_tsne)

        return


    if args.traintest:
        test_dataset = datasets.MNIST('../data', train=False,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize((0.1307,),
                                                               (0.3081,))
                                      ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True,
            **kwargs)

    # Pytorch has default MNIST dataloader which loads data at each iteration
    data_transforms = {"train": transforms.Compose([
                            transforms.RandomRotation(degrees=10),
                            #transforms.RandomAffine(degrees=10, scale=(.9, .9)),# scale=(.1, .1), shear=.1),
                            transforms.ToTensor(),
                            #transforms.RandomErasing(p=0.8, scale=(.05, .05)),
                            #transforms.Normalize((0.1307,), (0.3081,))
                            ]),
                        "val": transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307,), (0.3081,))
                        ])
                    }
    train_dataset = datasets.MNIST('../data', train=True, download=True, transform=data_transforms["train"])
    val_dataset = datasets.MNIST('../data', train=False, download=True, transform=data_transforms["val"])

    # You can assign indices for training/validation or use a random subset for
    # training by using SubsetRandomSampler. Right now the train and validation
    # sets are built from the same indices - this is bad! Change it so that
    # the training and validation sets are disjoint and have the correct relative sizes.

    class_data = [[] for _ in range(10)]

    for i, elem in enumerate(tqdm(val_dataset)):
        class_data[elem[1]].append(i)

    split = .85
    subset_indices_train = []
    subset_indices_valid = []
    for i in range(10):
        np.random.shuffle(class_data[i])
        subset_indices_train += class_data[i][:int(len(class_data[i])*split)]
        subset_indices_valid += class_data[i][int(len(class_data[i])*split):]

    if args.traintest:
        train_fracs = [1., .5, .25, .125, .0625]
    else:
        train_fracs = [1.]

    train_frac_losses = []
    test_frac_losses = []
    train_frac_accs = []
    test_frac_accs = []
    for train_frac in train_fracs:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size,
            sampler=SubsetRandomSampler(subset_indices_train[:int(len(subset_indices_train)*train_frac)])
        )
        val_loader = torch.utils.data.DataLoader(
            val_dataset, batch_size=args.test_batch_size,
            sampler=SubsetRandomSampler(subset_indices_valid[:int(len(subset_indices_valid)*train_frac)])
        )

        #for imgs, label in train_loader:
        #    for img in imgs:
        #        plt.figure()
        #        plt.imshow(img[0])
        #        plt.show()

        # Load your model [fcNet, ConvNet, Net]
        model = Net().to(device)

        # Try different optimzers here [Adam, SGD, RMSprop]
        optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

        # Set your learning rate scheduler
        scheduler = StepLR(optimizer, step_size=args.step, gamma=args.gamma)

        # Training loop
        train_losses = []
        test_losses = []
        train_accs = []
        test_accs = []
        for epoch in range(1, args.epochs + 1):
            train_loss, train_acc = train(args, model, device, train_loader, optimizer, epoch)
            test_loss, test_acc = test(model, device, val_loader)
            scheduler.step()    # learning rate scheduler
            train_losses.append(train_loss)
            train_accs.append(train_acc)
            test_losses.append(test_loss)
            test_accs.append(test_acc)
            # You may optionally save your model at each epoch here

        if not args.traintest:
            figs, axs = plt.subplots(2)
            axs[0].set_title("Loss")
            axs[0].set_ylabel("Loss")
            axs[0].plot(test_losses)
            axs[0].plot(train_losses)
            axs[1].set_title("Accuracy")
            axs[1].set_ylabel("Accuracy")
            axs[1].plot(test_accs)
            axs[1].set_xlabel("Epoch")
            plt.show()
        else:
            test_loss, test_acc = test(model, device, test_loader)
            test_frac_losses.append(test_loss)
            train_frac_losses.append(train_loss)
            test_frac_accs.append(test_acc)
            train_frac_accs.append(train_acc)

    if args.traintest:
        plt.figure()
        plt.subplot(2, 1, 1)
        plt.title("Loss")
        plt.plot(train_fracs, test_frac_losses)
        plt.plot(train_fracs, train_frac_losses)
        plt.legend(["Test", "Train"])
        plt.ylabel("Loss (log)")
        plt.yscale("log")
        plt.xscale("log")

        plt.subplot(2, 1, 2)
        plt.title("Accuracy")
        plt.plot(train_fracs, test_frac_accs)
        plt.plot(train_fracs, train_frac_accs)
        plt.legend(["Test", "Train"])
        plt.xlabel("Fraction of Training Set (log)")
        plt.ylabel("Accuracy (log)")
        plt.yscale("log")
        plt.xscale("log")
        plt.show()

    if args.save_model:
        torch.save(model.state_dict(), "mnist_model.pt")
Beispiel #23
0
#crop face from img
faceCascade = cv2.CascadeClassifier(detectFace)
image = cv2.imread(imagePath)
faces = faceCascade.detectMultiScale(image,
                                     scaleFactor=1.1,
                                     minNeighbors=5,
                                     minSize=(10, 10))
for (x, y, w, h) in faces:
    cv2.imwrite('./image/crop/' + 'test_face.jpg', image[y:y + h, x:x + w])
cv2.waitKey(0)

#transfer img to tensor
dataTransforms = transforms.Compose([
    transforms.Resize(inputSize),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

#load net
net = ShuffleNetV2(inputSize)
checkpoint = torch.load(fiiqaWeight)
net.load_state_dict(checkpoint['net'])
net.eval()

#load face and get expect num
face = Image.open(facePath)
imgblob = dataTransforms(face).unsqueeze(0)
#imgblob = Variable(imgblob)
torch.no_grad()
predict = F.softmax(net(imgblob), dim=1)
    torch.cuda.manual_seed_all(opt.manualSeed)

cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

if opt.dataset in ['imagenet', 'folder', 'lfw']:
    # folder dataset
    dataset = dset.ImageFolder(root=opt.dataroot,
                               transform=transforms.Compose([
                                   transforms.Scale(opt.imageSize),
                                   transforms.CenterCrop(opt.imageSize),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
elif opt.dataset == 'lsun':
    dataset = dset.LSUN(db_path=opt.dataroot,
                        classes=['bedroom_train'],
                        transform=transforms.Compose([
                            transforms.Scale(opt.imageSize),
                            transforms.CenterCrop(opt.imageSize),
                            transforms.ToTensor(),
                            transforms.Normalize((0.5, 0.5, 0.5),
                                                 (0.5, 0.5, 0.5)),
                        ]))
elif opt.dataset == 'cifar10':
    dataset = dset.CIFAR10(root=opt.dataroot,
def main():
  # Init logger
  
  if not os.path.isdir(args.save_path):
    os.makedirs(args.save_path)
  log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.manualSeed)), 'w')
  print_log('save path : {}'.format(args.save_path), log)
  state = {k: v for k, v in args._get_kwargs()}
  print_log(state, log)
  print_log("Random Seed: {}".format(args.manualSeed), log)
  print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
  print_log("torch  version : {}".format(torch.__version__), log)
  print_log("cudnn  version : {}".format(torch.backends.cudnn.version()), log)

  # Init dataset
  
  if not os.path.exists(args.data_path):
    os.makedirs(args.data_path)

  if args.dataset == 'cifar10':
    mean = [x / 255 for x in [125.3, 123.0, 113.9]]
    std = [x / 255 for x in [63.0, 62.1, 66.7]]
  elif args.dataset == 'cifar100':
    mean = [x / 255 for x in [129.3, 124.1, 112.4]]
    std = [x / 255 for x in [68.2, 65.4, 70.4]]
  else:
    assert False, "Unknow dataset : {}".format(args.dataset)

  train_transform = transforms.Compose(
    [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
     transforms.Normalize(mean, std)])
  test_transform = transforms.Compose(
    [transforms.ToTensor(), transforms.Normalize(mean, std)])

  if args.dataset == 'cifar10':
    train_data = dset.CIFAR10(args.data_path, train=True, transform=train_transform, download=True)
    test_data = dset.CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
    num_classes = 10
  elif args.dataset == 'cifar100':
    train_data = dset.CIFAR100(args.data_path, train=True, transform=train_transform, download=True)
    test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
    num_classes = 100
  elif args.dataset == 'svhn':
    train_data = dset.SVHN(args.data_path, split='train', transform=train_transform, download=True)
    test_data = dset.SVHN(args.data_path, split='test', transform=test_transform, download=True)
    num_classes = 10
  elif args.dataset == 'stl10':
    train_data = dset.STL10(args.data_path, split='train', transform=train_transform, download=True)
    test_data = dset.STL10(args.data_path, split='test', transform=test_transform, download=True)
    num_classes = 10
  elif args.dataset == 'imagenet':
    assert False, 'Do not finish imagenet code'
  else:
    assert False, 'Do not support dataset : {}'.format(args.dataset)

  train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
                         num_workers=args.workers, pin_memory=True)
  test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,
                        num_workers=args.workers, pin_memory=True)

  print_log("=> creating model '{}'".format(args.arch), log)
  # Init model, criterion, and optimizer
  net = models.__dict__[args.arch](num_classes)
  print_log("=> network :\n {}".format(net), log)

  net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

  # define loss function (criterion) and optimizer
  criterion = torch.nn.CrossEntropyLoss()
  optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
                weight_decay=state['decay'], nesterov=False)
  #optimizer = AccSGD(net.parameters(), state['learning_rate'], kappa = 1000.0, xi = 10.0)
  #optimizer.zero_grad()
  #loss_fn(model(input), target).backward()
 # optimizer.step()              
  if args.use_cuda:
    net.cuda()
    criterion.cuda()

  recorder = RecorderMeter(args.epochs)
  # optionally resume from a checkpoint
  if args.resume:
    if os.path.isfile(args.resume):
      print_log("=> loading checkpoint '{}'".format(args.resume), log)
      checkpoint = torch.load(args.resume)
      recorder = checkpoint['recorder']
      args.start_epoch = checkpoint['epoch']
      net.load_state_dict(checkpoint['state_dict'])
      optimizer.load_state_dict(checkpoint['optimizer'])
      print_log("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch']), log)
    else:
      raise ValueError("=> no checkpoint found at '{}'".format(args.resume))
  else:
    print_log("=> do not use any checkpoint for {} model".format(args.arch), log)

  if args.evaluate:
    validate(test_loader, net, criterion, log)
    return

  # Main loop
  start_time = time.time()
  epoch_time = AverageMeter()
  train_cc=0
  loss_e=0
  
  for epoch in range(args.start_epoch, args.epochs):
    current_learning_rate = adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule,loss_e)
    optimizer = torch.optim.SGD(net.parameters(), current_learning_rate, momentum=state['momentum'],
                weight_decay=state['decay'], nesterov=False)
    need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))
    need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)

    print_log('\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs, need_time, current_learning_rate) \
                + ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)

    # train for one epoch
    train_acc, train_los,loss_e = train(train_loader, net, criterion, optimizer, epoch, log,train_cc)

    # evaluate on validation set
    #val_acc,   val_los   = extract_features(test_loader, net, criterion, log)
    val_acc,   val_los   = validate(test_loader, net, criterion, log)
    is_best = recorder.update(epoch, train_los, train_acc, val_los, val_acc)

    save_checkpoint({
      'epoch': epoch + 1,
      'arch': args.arch,
      'state_dict': net.state_dict(),
      'recorder': recorder,
      'optimizer' : optimizer.state_dict(),
      'args'      : copy.deepcopy(args),
    }, is_best, args.save_path, 'sgd8_40lcheck.pth.tar')

    # measure elapsed time
    epoch_time.update(time.time() - start_time)
    start_time = time.time()
    recorder.plot_curve( os.path.join(args.save_path, 'sgd8_40l.png') )

  log.close()
Beispiel #26
0
    # init model
    device = torch.device("cuda")
    
    model_name = "resnet{depth}".format(depth=config.model_depth)
    model = torchvision.models.__dict__[model_name](pretrained=config.pretrained)
    model.fc = torch.nn.Linear(model.fc.in_features, 1)
    
    # set transforms
    augs = [transforms.ToTensor()]

    if config.aug:
        augs.append(transforms.RandomHorizontalFlip(p=0.5))
        augs.append(transforms.RandomVerticalFlip(p=0.5))

    tfms = transforms.Compose(augs)

    dataset_aug = datasets.ImageFolder("data/ready_to_train", transform=tfms)
    dataset = datasets.ImageFolder("data/ready_to_train", transform=transforms.ToTensor())

    train_len = int(dataset.__len__()*0.75)
    test_len = dataset.__len__()-train_len
    trainset, _  = torch.utils.data.random_split(dataset_aug, [train_len, test_len], generator=torch.Generator().manual_seed(42))
    _ , testset  = torch.utils.data.random_split(dataset, [train_len, test_len], generator=torch.Generator().manual_seed(42))

    # loaders
    train_loader = torch.utils.data.DataLoader(
        trainset,
        batch_size=config.batch_size,
        shuffle=True
    )
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'valf')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.2, 1.0)

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomResizedCrop(224, scale = data_aug_scale),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.train_batch, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.test_batch, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif 'resnext' in args.arch:
        model = models.__dict__[args.arch](
                    baseWidth=args.base_width,
                    cardinality=args.cardinality,
                )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    flops, params = get_model_complexity_info(model, (224, 224), as_strings=False, print_per_layer_stat=False)
    print('Flops:  %.3f' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))


    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']
        flag = True 
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False
        model.load_state_dict(c)
        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch, use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch, use_cuda)

        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

    logger.close()

    print('Best acc:')
    print(best_acc)
Beispiel #28
0
    def __init__(self, args, valid=False, one_hot=True):
        self.args = args
        self.valid = valid
        self.one_hot = one_hot

        print('\nData Preparation')
        # Data Uplaod
        data_transform = transforms.Compose([
            transforms.ToTensor(),
        ])
        encoded_data_transform = transforms.Compose([
            transforms.Resize(224),
            transforms.ToTensor(),
        ])

        # root_path = '/Users/changgang/Documents/DATA/Data For Research/CIFAR'
        root_path = '/HDD/personal/zhengchanggang/CIFAR'
        # root_path = '../data'
        if (args.dataset == 'cifar-10'):
            self.trainset = CIFAR10(root=root_path,
                                    train=True,
                                    valid=valid,
                                    classes=np.arange(10),
                                    transform=data_transform)
            self.testset = CIFAR10(root=root_path,
                                   train=False,
                                   classes=np.arange(10),
                                   transform=data_transform)
            self.encoded_trainset = CIFAR10(root=root_path,
                                            train=True,
                                            valid=valid,
                                            encoded=True,
                                            classes=np.arange(10),
                                            transform=encoded_data_transform)
            self.encoded_testset = CIFAR10(root=root_path,
                                           train=False,
                                           encoded=True,
                                           classes=np.arange(10),
                                           transform=encoded_data_transform)
            self.with_encoded_trainset = CIFAR10(
                root=root_path,
                train=True,
                valid=valid,
                encoded=True,
                with_encoded=True,
                classes=np.arange(10),
                transform=encoded_data_transform)
            self.with_encoded_testset = CIFAR10(
                root=root_path,
                train=False,
                encoded=True,
                with_encoded=True,
                classes=np.arange(10),
                transform=encoded_data_transform)
        else:
            assert args.dataset == 'cifar-100'
            self.trainset = CIFAR100(root=root_path,
                                     train=True,
                                     valid=valid,
                                     classes=np.arange(100),
                                     transform=data_transform)
            self.testset = CIFAR100(root=root_path,
                                    train=False,
                                    classes=np.arange(100),
                                    transform=data_transform)
            self.encoded_trainset = CIFAR100(root=root_path,
                                             train=True,
                                             valid=valid,
                                             encoded=True,
                                             classes=np.arange(100),
                                             transform=encoded_data_transform)
            self.encoded_testset = CIFAR100(root=root_path,
                                            train=False,
                                            encoded=True,
                                            classes=np.arange(100),
                                            transform=encoded_data_transform)
            self.with_encoded_trainset = CIFAR100(
                root=root_path,
                train=True,
                valid=valid,
                encoded=True,
                with_encoded=True,
                classes=np.arange(100),
                transform=encoded_data_transform)
            self.with_encoded_testset = CIFAR100(
                root=root_path,
                train=False,
                encoded=True,
                with_encoded=True,
                classes=np.arange(100),
                transform=encoded_data_transform)

        self.label_transformer = OneHotEncoder(
            sparse=False, categories='auto').fit(
                np.array(self.trainset.train_labels).reshape(-1, 1))
        if self.one_hot:  # DEFAULT True
            self.trainset.train_labels = self.label_transformer.transform(
                np.array(self.trainaset.train_labels).reshape(-1, 1))
            self.testset.test_labels = self.label_transformer.transform(
                np.array(self.testset.test_labels).reshape(-1, 1))
            self.encoded_trainset.train_labels = self.label_transformer.transform(
                np.array(self.encoded_trainset.train_labels).reshape(-1, 1))
            self.encoded_testset.test_labels = self.label_transformer.transform(
                np.array(self.encoded_testset.test_labels).reshape(-1, 1))

            self.with_encoded_trainset.train_labels = self.label_transformer.transform(
                np.array(self.with_encoded_trainset.train_labels).reshape(
                    -1, 1))
            self.with_encoded_testset.test_labels = self.label_transformer.transform(
                np.array(self.with_encoded_testset.test_labels).reshape(-1, 1))
            self.with_encoded_trainset.encoded_train_labels = self.label_transformer.transform(
                np.array(
                    self.with_encoded_trainset.encoded_train_labels).reshape(
                        -1, 1))
            self.with_encoded_testset.encoded_test_labels = self.label_transformer.transform(
                np.array(
                    self.with_encoded_testset.encoded_test_labels).reshape(
                        -1, 1))

        if valid:
            self.validset = Validset(copy.deepcopy(trainset))
            self.encoded_validset = Validset(copy.deepcopy(
                self.encoded_trainset),
                                             encoded=True)
            self.with_encoded_validset = Validset(copy.deepcopy(
                self.with_encoded_trainset),
                                                  encoded=True,
                                                  with_encoded=True)

        else:
            self.validloader = None
            self.encoded_validloader = None
            self.with_encoded_validset = None
Beispiel #29
0
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np


transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=2)

validationset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       transform=transform)
validationloader = torch.utils.data.DataLoader(validationset, batch_size=4,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

# functions to show an image


def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
Beispiel #30
0
@Author: John
@Email: [email protected]
@Date: 2020-06-11 10:02:35
@LastEditor: John
@LastEditTime: 2020-06-11 16:57:34
@Discription: 
@Environment: python 3.7.7
'''

import numpy as np
import torch
import torchvision.transforms as T
from PIL import Image

resize = T.Compose(
    [T.ToPILImage(),
     T.Resize(40, interpolation=Image.CUBIC),
     T.ToTensor()])


def get_cart_location(env, screen_width):
    world_width = env.x_threshold * 2
    scale = screen_width / world_width
    return int(env.state[0] * scale + screen_width / 2.0)  # MIDDLE OF CART


def get_screen(env, device):
    # Returned screen requested by gym is 400x600x3, but is sometimes larger
    # such as 800x1200x3. Transpose it into torch order (CHW).
    screen = env.render(mode='rgb_array').transpose((2, 0, 1))
    # Cart is in the lower half, so strip off the top and bottom of the screen
    _, screen_height, screen_width = screen.shape