Exemplo n.º 1
0
def GetTrainTestData(dataset, batch_size, dataset_dir, shuffle_train=True):

    # define transform for train data
    # mean and std calculated from utils/GetMeanNdStd
    if dataset == "cifar10":
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

        # load datasets
        trainData = torchvision.datasets.CIFAR10(root=dataset_dir,
                                                 train=True,
                                                 download=True,
                                                 transform=transform_train)
        # sampler = list(range(64*10))
        trainDataGen = DataLoader(
            trainData,
            batch_size=batch_size,
            num_workers=1,
            shuffle=shuffle_train
        )  #, sampler=Sampler.SubsetRandomSampler(sampler))

        # change transform for test dataset
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        testData = torchvision.datasets.CIFAR10(root=dataset_dir,
                                                train=False,
                                                download=True,
                                                transform=transform_test)
        testDataGen = DataLoader(testData,
                                 batch_size=batch_size,
                                 num_workers=1,
                                 shuffle=False)

        return trainDataGen, testDataGen

    elif dataset == "cifar100":
        transform = transforms.Compose([
            # transforms.Resize(224),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761))
        ])
        # load datasets
        trainData = torchvision.datasets.CIFAR100(root=dataset_dir,
                                                  train=True,
                                                  download=True,
                                                  transform=transform)
        # sampler = list(range(64*10))
        trainDataGen = DataLoader(
            trainData, batch_size=batch_size, num_workers=1,
            shuffle=False)  #, sampler=Sampler.SubsetRandomSampler(sampler))

        # change transform for test dataset
        transform = transforms.Compose([
            # transforms.Resize(224),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761))
        ])

        testData = torchvision.datasets.CIFAR100(root=dataset_dir,
                                                 train=False,
                                                 download=True,
                                                 transform=transform)
        testDataGen = DataLoader(testData,
                                 batch_size=batch_size,
                                 num_workers=1,
                                 shuffle=False)

        return trainDataGen, testDataGen
Exemplo n.º 2
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    # create model
    print("=> creating model '{}'".format(args.arch))
    num_classes = 100 if args.dataset == 'cifar100' else 10
    use_norm = True if args.loss_type == 'LDAM' else False
    model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        model = torch.nn.DataParallel(model).cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cuda:0')
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_val = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    if args.dataset == 'cifar10':
        train_dataset = IMBALANCECIFAR10(root='./data', imb_type=args.imb_type, imb_factor=args.imb_factor, rand_number=args.rand_number, train=True, download=True, transform=transform_train)
        val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_val)
    elif args.dataset == 'cifar100':
        train_dataset = IMBALANCECIFAR100(root='./data', imb_type=args.imb_type, imb_factor=args.imb_factor, rand_number=args.rand_number, train=True, download=True, transform=transform_train)
        val_dataset = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
    else:
        warnings.warn('Dataset is not listed')
        return
    cls_num_list = train_dataset.get_cls_num_list()
    print('cls num list:')
    print(cls_num_list)
    args.cls_num_list = cls_num_list
    
    train_sampler = None
        
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=100, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # init log for training
    log_training = open(os.path.join(args.root_log, args.store_name, 'log_train.csv'), 'w')
    log_testing = open(os.path.join(args.root_log, args.store_name, 'log_test.csv'), 'w')
    with open(os.path.join(args.root_log, args.store_name, 'args.txt'), 'w') as f:
        f.write(str(args))
    tf_writer = SummaryWriter(log_dir=os.path.join(args.root_log, args.store_name))
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        if args.train_rule == 'None':
            train_sampler = None  
            per_cls_weights = None 
        elif args.train_rule == 'Resample':
            train_sampler = ImbalancedDatasetSampler(train_dataset)
            per_cls_weights = None
        elif args.train_rule == 'Reweight':
            train_sampler = None
            beta = 0.9999
            effective_num = 1.0 - np.power(beta, cls_num_list)
            per_cls_weights = (1.0 - beta) / np.array(effective_num)
            per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
            per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
        elif args.train_rule == 'DRW':
            train_sampler = None
            idx = epoch // 160
            betas = [0, 0.9999]
            effective_num = 1.0 - np.power(betas[idx], cls_num_list)
            per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
            per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
            per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
        else:
            warnings.warn('Sample rule is not listed')
        
        if args.loss_type == 'CE':
            criterion = nn.CrossEntropyLoss(weight=per_cls_weights).cuda(args.gpu)
        elif args.loss_type == 'LDAM':
            criterion = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30, weight=per_cls_weights).cuda(args.gpu)
        elif args.loss_type == 'Focal':
            criterion = FocalLoss(weight=per_cls_weights, gamma=1).cuda(args.gpu)
        else:
            warnings.warn('Loss type is not listed')
            return

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, log_training, tf_writer)
        
        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, epoch, args, log_testing, tf_writer)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        tf_writer.add_scalar('acc/test_top1_best', best_acc1, epoch)
        output_best = 'Best Prec@1: %.3f\n' % (best_acc1)
        print(output_best)
        log_testing.write(output_best + '\n')
        log_testing.flush()

        save_checkpoint(args, {
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_acc1': best_acc1,
            'optimizer' : optimizer.state_dict(),
        }, is_best)
Exemplo n.º 3
0
parser.add_argument('--compression', default=0.25, type=float)
parser.add_argument('--log_after_steps', default=100, type=int)
parser.add_argument('--lambda_bce', default=1, type=float)
parser.add_argument('--lambda_ortho', default=1, type=float)
parser.add_argument('--lambda_quant', default=1, type=float)
parser.add_argument('--lambda_l1', default=1, type=float)
args = parser.parse_args()

start_epoch = 0

device = 'cuda' if torch.cuda.is_available() else 'cpu'

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = torchvision.datasets.CIFAR10(root='/home/shashant/.torch/',
                                        train=True,
                                        download=True,
                                        transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
def main():
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')

    parser.add_argument('--result_dir',
                        type=str,
                        help='dir to save result txt files',
                        default='results/')
    parser.add_argument('--noise_rate',
                        type=float,
                        help='corruption rate, should be less than 1',
                        default=0.5)
    parser.add_argument('--forget_rate',
                        type=float,
                        help='forget rate',
                        default=None)
    parser.add_argument('--noise_type',
                        type=str,
                        help='[pairflip, symmetric]',
                        default='symmetric')
    parser.add_argument(
        '--num_gradual',
        type=int,
        default=10,
        help=
        'how many epochs for linear drop rate, can be 5, 10, 15. This parameter is equal to Tk for R(T) in Co-teaching paper.'
    )
    parser.add_argument(
        '--exponent',
        type=float,
        default=1,
        help=
        'exponent of the forget rate, can be 0.5, 1, 2. This parameter is equal to c in Tc for R(T) in Co-teaching paper.'
    )
    parser.add_argument('--top_bn', action='store_true')
    parser.add_argument('--dataset',
                        type=str,
                        help='mnist, cifar10, or cifar100',
                        default='mnist')
    parser.add_argument('--n_epoch', type=int, default=300)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--print_freq', type=int, default=50)
    parser.add_argument('--num_workers',
                        type=int,
                        default=2,
                        help='how many subprocesses to use for data loading')
    parser.add_argument('--num_iter_per_epoch', type=int, default=400)
    parser.add_argument('--epoch_decay_start', type=int, default=80)
    parser.add_argument('--eps', type=float, default=9.9)

    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        metavar='N',
                        help='input batch size for training (default: 256)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=4000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.2,
                        metavar='LR',
                        help='learning rate (default: 0.01)')  #sm 0.5   lr=0.5
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument(
        '--noise-level',
        type=float,
        default=80.0,
        help=
        'percentage of noise added to the data (values from 0. to 100.), default: 80.'
    )
    parser.add_argument(
        '--root-dir',
        type=str,
        default='.',
        help=
        'path to CIFAR dir where cifar-10-batches-py/ and cifar-100-python/ are located. If the datasets are not downloaded, they will automatically be and extracted to this path, default: .'
    )
    args = parser.parse_args()

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    batch_size = args.batch_size

    # if args.dataset=='mnist':
    #     input_channel=1
    #     num_classes=10
    #     args.top_bn = False
    #     args.epoch_decay_start = 80
    #     args.n_epoch = 200
    #     train_dataset = MNIST(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                          )
    #
    #     test_dataset = MNIST(root='./data/',
    #                                download=True,
    #                                train=False,
    #                                transform=transforms.ToTensor(),
    #                                noise_type=args.noise_type,
    #                                noise_rate=args.noise_rate
    #                         )
    #
    # if args.dataset=='cifar10':
    #     input_channel=3
    #     num_classes=10
    #     args.top_bn = False
    #     args.epoch_decay_start = 80
    #     args.n_epoch = 200
    #     train_dataset = CIFAR10(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                            )
    #
    #     test_dataset = CIFAR10(root='./data/',
    #                                 download=True,
    #                                 train=False,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                           )
    #
    # if args.dataset=='cifar100':
    #     input_channel=3
    #     num_classes=100
    #     args.top_bn = False
    #     args.epoch_decay_start = 100
    #     args.n_epoch = 200
    #     train_dataset = CIFAR100(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                             )
    #
    #     test_dataset = CIFAR100(root='./data/',
    #                                 download=True,
    #                                 train=False,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                             )
    # if args.forget_rate is None:
    #     forget_rate=args.noise_rate
    # else:
    #     forget_rate=args.forget_rate
    #
    # noise_or_not = train_dataset.noise_or_not
    # # Data Loader (Input Pipeline)
    # print('loading dataset...')
    # train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
    #                                            batch_size=batch_size,
    #                                            num_workers=args.num_workers,
    #                                            drop_last=True,
    #                                            shuffle=True)
    #
    # test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
    #                                           batch_size=batch_size,
    #                                           num_workers=args.num_workers,
    #                                           drop_last=True,
    #                                           shuffle=False)
    '''
    mean = [0.4914, 0.4822, 0.4465]
    std = [0.2023, 0.1994, 0.2010]

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])'''
    if args.dataset == 'cifar10':
        mean = [0.4914, 0.4822, 0.4465]
        std = [0.2023, 0.1994, 0.2010]

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])
        trainset = datasets.CIFAR10(root=args.root_dir,
                                    train=True,
                                    download=True,
                                    transform=transform_train)
        trainset_track = datasets.CIFAR10(root=args.root_dir,
                                          train=True,
                                          transform=transform_train)
        testset = datasets.CIFAR10(root=args.root_dir,
                                   train=False,
                                   transform=transform_test)
        num_classes = 10
    elif args.dataset == 'cifar100':
        mean = [0.4914, 0.4822, 0.4465]
        std = [0.2023, 0.1994, 0.2010]

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])
        trainset = datasets.CIFAR100(root=args.root_dir,
                                     train=True,
                                     download=True,
                                     transform=transform_train)
        trainset_track = datasets.CIFAR100(root=args.root_dir,
                                           train=True,
                                           transform=transform_train)
        testset = datasets.CIFAR100(root=args.root_dir,
                                    train=False,
                                    transform=transform_test)
        num_classes = 100
    elif args.dataset == 'imagenet_tiny':
        init_epoch = 100
        num_classes = 200
        #data_root = '/home/xingyu/Data/phd/data/imagenet-tiny/tiny-imagenet-200'
        data_root = '/home/iedl/w00536717/coteaching_plus-master/data/imagenet-tiny/tiny-imagenet-200'
        train_kv = "train_noisy_%s_%s_kv_list.txt" % (args.noise_type,
                                                      args.noise_rate)
        test_kv = "val_kv_list.txt"

        normalize = transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                                         std=[0.2302, 0.2265, 0.2262])

        trainset = ImageFilelist(root=data_root,
                                 flist=os.path.join(data_root, train_kv),
                                 transform=transforms.Compose([
                                     transforms.RandomResizedCrop(56),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     normalize,
                                 ]))
        trainset_track = ImageFilelist(root=data_root,
                                       flist=os.path.join(data_root, train_kv),
                                       transform=transforms.Compose([
                                           transforms.RandomResizedCrop(56),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))

        testset = ImageFilelist(root=data_root,
                                flist=os.path.join(data_root, test_kv),
                                transform=transforms.Compose([
                                    transforms.Resize(64),
                                    transforms.CenterCrop(56),
                                    transforms.ToTensor(),
                                    normalize,
                                ]))
    elif args.dataset == 'clothing1M':
        init_epoch = 100
        num_classes = 14
        data_root = '/home/iedl/w00536717/data/cloting1m/'
        #train_kv =
        #test_kv =

        train_transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        test_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        '''
        train_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.RandomCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),                
                transforms.Normalize((0.6959, 0.6537, 0.6371),(0.3113, 0.3192, 0.3214)),                     
            ]) 
        test_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize((0.6959, 0.6537, 0.6371),(0.3113, 0.3192, 0.3214)),
            ]) 
        '''
    ''' 
    train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4,
                                               pin_memory=True)
    train_loader_track = torch.utils.data.DataLoader(trainset_track, batch_size=args.batch_size, shuffle=False,
                                                     num_workers=4, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, num_workers=4,
                                              pin_memory=True)
    '''
    train_dataset = Clothing(root=data_root,
                             img_transform=train_transform,
                             train=True,
                             valid=False,
                             test=False)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=32)
    train_dataset_track = Clothing(root=data_root,
                                   img_transform=train_transform,
                                   train=True,
                                   valid=False,
                                   test=False)
    train_loader_track = torch.utils.data.DataLoader(
        dataset=train_dataset_track,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32)
    valid_dataset = Clothing(root=data_root,
                             img_transform=train_transform,
                             train=False,
                             valid=True,
                             test=False)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=32)
    test_dataset = Clothing(root=data_root,
                            img_transform=test_transform,
                            train=False,
                            valid=False,
                            test=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=32)
    #print(dir(train_loader))
    #labels = get_data_cifar_2(train_loader_track)  # it should be "clonning"
    #noisy_labels = add_noise_cifar_wo(train_loader, args.noise_level,
    #                                 args.noise_type)  # it changes the labels in the train loader directly
    #noisy_labels_track = add_noise_cifar_wo(train_loader_track, args.noise_level, args.noise_type)

    # Define models
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    #cnn = PreResNet_two.ResNet18(num_classes=num_classes).to(device)
    cnn = MyResNet_zero.MyCustomResnet(num_classes)  #.to(device)
    cnn = nn.DataParallel(cnn, device_ids=[0, 1, 2, 3, 4, 5, 6, 7])
    cnn.to(device)
    cnn.cuda()
    #print(model.parameters)
    #optimizer1 = torch.optim.SGD(cnn1.parameters(), lr=learning_rate)
    optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.lr,
                                weight_decay=1e-3,
                                momentum=args.momentum)
    #optimizer = torch.optim.Adam(cnn.parameters(), lr=args.lr,weight_decay=1e-4)
    #optimizer1 = torch.optim.SGD(cnn.parameters(), lr=1e-2,weight_decay=1e-4,momentum=args.momentum)
    bmm_model = bmm_model_maxLoss = bmm_model_minLoss = 0

    acc = []
    loss = []
    loss_pure = []
    loss_corrupt = []
    out = []
    temp = 1
    for epoch in range(1, args.n_epoch + 1):
        if epoch < 3:
            #epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
            #    track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
            #                        bmm_model_minLoss)
            #l1,temp=train(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss,temp)
            l1 = train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #adjust_learning_rate(optimizer, 0.1)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l1=train_true_label(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l1=train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #loss.append(l1)
            acc.append(test(args, cnn, device, test_loader))
        elif epoch < 80:
            if epoch == 3:
                adjust_learning_rate(optimizer, args.lr / 10)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)
            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #l1=train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            acc.append(test(args, cnn, device, test_loader))
        elif epoch < 200:
            if epoch == 10:
                adjust_learning_rate(optimizer, args.lr / 1000)
            elif epoch == 100:
                adjust_learning_rate(optimizer, args.lr / 5000)
            elif epoch == 160:
                adjust_learning_rate(optimizer, args.lr / 25000)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)

            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #adjust_learning_rate(optimizer, args.lr/1000)
            #epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
            #    track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
            #                        bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #acc.append(test(args, cnn, device, test_loader))
            #l1=train_true_label(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l3=train_DMI(args, cnn, device, train_loader, optimizer1, epoch, bmm_model, bmm_model_maxLoss, bmm_model_minLoss, criterion = DMI_loss)
            #l1 = train_together(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,
            #                      bmm_model_minLoss)
            #loss.append(l1)
            #out.append(out10)
            acc.append(test(args, cnn, device, test_loader))
        else:
            #adjust_learning_rate(optimizer, args.lr/10000)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)
            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #acc.append(test(args, cnn, device, test_loader))
            #l1 = train_true_label(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,bmm_model_minLoss)
            #l2 = train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch, bmm_model, bmm_model_maxLoss,bmm_model_minLoss)
            #l3=train_DMI(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss, criterion = DMI_loss)
            #l1 = train_together(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,
            #                    bmm_model_minLoss)
            #loss.append(l1)
            #out.append(out10)
            acc.append(test(args, cnn, device, test_loader))
        print("Evaluate on validset")
        test(args, cnn, device, valid_loader)
        temp -= 0.0024

    name = str(args.dataset) + " " + str(args.noise_type) + " " + str(
        args.noise_rate)
Exemplo n.º 5
0
#
text_encoder = RNN_ENCODER(27297, nhidden=256)
state_dict = \
    torch.load('../models/text_encoder100.pth', map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
text_encoder = text_encoder.cpu()
text_encoder.eval()

state_dict = \
    torch.load('../models/coco_AttnGAN2.pth', map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)

imsize = 64 * (2**(3 - 1))
image_transform = transforms.Compose([
    transforms.Resize(int(imsize * 76 / 64)),
    transforms.RandomCrop(imsize),
    transforms.RandomHorizontalFlip()
])

dataset = TextDataset('../data/coco/',
                      'test',
                      base_size=64,
                      transform=image_transform)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=100,
                                         drop_last=True,
                                         shuffle=True,
                                         num_workers=12)

wordtoix = dataset.wordtoix
Exemplo n.º 6
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--world_size',
                        type=int,
                        default=1,
                        help='number of GPUs to use')

    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--wd',
                        type=float,
                        default=1e-4,
                        help='weight decay (default: 5e-4)')
    parser.add_argument('--lr-decay-every',
                        type=int,
                        default=100,
                        help='learning rate decay by 10 every X epochs')
    parser.add_argument('--lr-decay-scalar',
                        type=float,
                        default=0.1,
                        help='--')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--run_test',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='run test only')

    parser.add_argument(
        '--limit_training_batches',
        type=int,
        default=-1,
        help='how many batches to do per training, -1 means as many as possible'
    )

    parser.add_argument('--no_grad_clip',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='turn off gradient clipping')

    parser.add_argument('--get_flops',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='add hooks to compute flops')

    parser.add_argument(
        '--get_inference_time',
        default=False,
        type=str2bool,
        nargs='?',
        help='runs valid multiple times and reports the result')

    parser.add_argument('--mgpu',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='use data paralization via multiple GPUs')

    parser.add_argument('--dataset',
                        default="MNIST",
                        type=str,
                        help='dataset for experiment, choice: MNIST, CIFAR10')

    parser.add_argument('--data',
                        metavar='DIR',
                        default='/imagenet',
                        help='path to imagenet dataset')

    parser.add_argument(
        '--model',
        default="lenet3",
        type=str,
        help='model selection, choices: lenet3, vgg, mobilenetv2, resnet18',
        choices=[
            "lenet3", "vgg", "mobilenetv2", "resnet18", "resnet152",
            "resnet50", "resnet50_noskip", "resnet20", "resnet34", "resnet101",
            "resnet101_noskip", "densenet201_imagenet", 'densenet121_imagenet',
            "multprun_gate5_gpu_0316_1", "mult_prun8_gpu", "multnas5_gpu"
        ])

    parser.add_argument('--tensorboard',
                        type=str2bool,
                        nargs='?',
                        help='Log progress to TensorBoard')

    parser.add_argument(
        '--save_models',
        default=True,
        type=str2bool,
        nargs='?',
        help='if True, models will be saved to the local folder')

    parser.add_argument('--fineturn_model',
                        type=str2bool,
                        nargs='?',
                        help='Log progress to TensorBoard')

    # ============================PRUNING added
    parser.add_argument(
        '--pruning_config',
        default=None,
        type=str,
        help=
        'path to pruning configuration file, will overwrite all pruning parameters in arguments'
    )

    parser.add_argument('--group_wd_coeff',
                        type=float,
                        default=0.0,
                        help='group weight decay')
    parser.add_argument('--name',
                        default='test',
                        type=str,
                        help='experiment name(folder) to store logs')

    parser.add_argument(
        '--augment',
        default=False,
        type=str2bool,
        nargs='?',
        help=
        'enable or not augmentation of training dataset, only for CIFAR, def False'
    )

    parser.add_argument('--load_model',
                        default='',
                        type=str,
                        help='path to model weights')

    parser.add_argument('--pruning',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='enable or not pruning, def False')

    parser.add_argument(
        '--pruning-threshold',
        '--pt',
        default=100.0,
        type=float,
        help=
        'Max error perc on validation set while pruning (default: 100.0 means always prune)'
    )

    parser.add_argument(
        '--pruning-momentum',
        default=0.0,
        type=float,
        help=
        'Use momentum on criteria between pruning iterations, def 0.0 means no momentum'
    )

    parser.add_argument('--pruning-step',
                        default=15,
                        type=int,
                        help='How often to check loss and do pruning step')

    parser.add_argument('--prune_per_iteration',
                        default=10,
                        type=int,
                        help='How many neurons to remove at each iteration')

    parser.add_argument(
        '--fixed_layer',
        default=-1,
        type=int,
        help='Prune only a given layer with index, use -1 to prune all')

    parser.add_argument('--start_pruning_after_n_iterations',
                        default=0,
                        type=int,
                        help='from which iteration to start pruning')

    parser.add_argument('--maximum_pruning_iterations',
                        default=1e8,
                        type=int,
                        help='maximum pruning iterations')

    parser.add_argument('--starting_neuron',
                        default=0,
                        type=int,
                        help='starting position for oracle pruning')

    parser.add_argument('--prune_neurons_max',
                        default=-1,
                        type=int,
                        help='prune_neurons_max')

    parser.add_argument('--pruning-method',
                        default=0,
                        type=int,
                        help='pruning method to be used, see readme.md')

    parser.add_argument('--pruning_fixed_criteria',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='enable or not criteria reevaluation, def False')

    parser.add_argument('--fixed_network',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='fix network for oracle or criteria computation')

    parser.add_argument(
        '--zero_lr_for_epochs',
        default=-1,
        type=int,
        help='Learning rate will be set to 0 for given number of updates')

    parser.add_argument(
        '--dynamic_network',
        default=False,
        type=str2bool,
        nargs='?',
        help=
        'Creates a new network graph from pruned model, works with ResNet-101 only'
    )

    parser.add_argument('--use_test_as_train',
                        default=False,
                        type=str2bool,
                        nargs='?',
                        help='use testing dataset instead of training')

    parser.add_argument('--pruning_mask_from',
                        default='',
                        type=str,
                        help='path to mask file precomputed')

    parser.add_argument(
        '--compute_flops',
        default=True,
        type=str2bool,
        nargs='?',
        help=
        'if True, will run dummy inference of batch 1 before training to get conv sizes'
    )

    # ============================END pruning added

    best_prec1 = 0
    global global_iteration
    global group_wd_optimizer
    global_iteration = 0

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    args.distributed = args.world_size > 1
    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=0)

    device = torch.device("cuda" if use_cuda else "cpu")

    # dataset loading section
    if args.dataset == "MNIST":
        kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
        train_loader = torch.utils.data.DataLoader(datasets.MNIST(
            '../data',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])),
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   **kwargs)
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('../data',
                           train=False,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.1307, ), (0.3081, ))
                           ])),
            batch_size=args.test_batch_size,
            shuffle=True,
            **kwargs)

    elif args.dataset == "CIFAR10":
        # Data loading code
        normalize = transforms.Normalize(
            mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
            std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

        if args.augment:
            transform_train = transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
        else:
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ])

        transform_test = transforms.Compose([transforms.ToTensor(), normalize])

        kwargs = {'num_workers': 8, 'pin_memory': True}
        train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
            '../data', train=True, download=True, transform=transform_train),
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   drop_last=True,
                                                   **kwargs)

        test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('../data', train=False, transform=transform_test),
            batch_size=args.test_batch_size,
            shuffle=True,
            **kwargs)

    elif args.dataset == "Imagenet":
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        if args.distributed:
            train_sampler = torch.utils.data.distributed.DistributedSampler(
                train_dataset)
        else:
            train_sampler = None

        kwargs = {'num_workers': 16}

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=(train_sampler is None),
            sampler=train_sampler,
            pin_memory=True,
            **kwargs)

        if args.use_test_as_train:
            train_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(
                    valdir,
                    transforms.Compose([
                        transforms.Resize(256),
                        transforms.CenterCrop(224),
                        transforms.ToTensor(),
                        normalize,
                    ])),
                batch_size=args.batch_size,
                shuffle=(train_sampler is None),
                **kwargs)

        test_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])),
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  pin_memory=True,
                                                  **kwargs)
    #wm
    elif args.dataset == "mult_5T":
        args.data_root = ['/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/CX_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TK_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/ZR_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TX_20200616',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/WM_20200709']

        args.data_root_val = ['/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/CX_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TK_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/ZR_20200709',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TX_20200616',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/WM_20200709']

        args.train_data_list = ['/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/CX_20200709/txt/cx_train.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TK_20200709/txt/tk_train.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/ZR_20200709/txt/zr_train.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TX_20200616/txt/tx_train.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/WM_20200709/txt/wm_train.txt']

        args.val_data_list = ['/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/CX_20200709/txt/cx_val.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TK_20200709/txt/tk_val.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/ZR_20200709/txt/zr_val.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/TX_20200616/txt/tx_val.txt',\
        '/workspace/mnt/storage/yangdecheng/yangdecheng/data/TR-NMA-07/WM_20200709/txt/wm_val.txt']

        num_tasks = len(args.data_root)
        args.ngpu = 8
        args.workers = 8
        args.train_batch_size = [40, 40, 40, 40, 40]  #36
        args.val_batch_size = [100, 100, 100, 100, 100]
        args.loss_weight = [1.0, 1.0, 1.0, 1.0, 1.0]
        args.val_num_classes = [[0, 1, 2, 3, 4], [0, 1, 2], [0, 1], [0, 1],
                                [0, 1, 2, 3, 4, 5, 6]]
        args.mixup_alpha = None  #None

        for i in range(num_tasks):
            args.train_batch_size[i] *= args.ngpu
            args.val_batch_size[i] *= args.ngpu

        pixel_mean = [0.406, 0.456, 0.485]
        pixel_std = [0.225, 0.224, 0.229]

        #私人定制:
        train_dataset = []
        for i in range(num_tasks):
            if i == 1:
                train_dataset.append(
                    FileListLabeledDataset(
                        args.train_data_list[i],
                        args.data_root[i],
                        Compose([
                            RandomResizedCrop(
                                112,
                                scale=(0.94, 1.),
                                ratio=(1. / 4., 4. / 1.)
                            ),  #scale=(0.7, 1.2), ratio=(1. / 1., 4. / 1.)
                            RandomHorizontalFlip(),
                            ColorJitter(brightness=[0.5, 1.5],
                                        contrast=[0.5, 1.5],
                                        saturation=[0.5, 1.5],
                                        hue=0),
                            ToTensor(),
                            Lighting(1, [0.2175, 0.0188, 0.0045],
                                     [[-0.5675, 0.7192, 0.4009],
                                      [-0.5808, -0.0045, -0.8140],
                                      [-0.5836, -0.6948, 0.4203]]),
                            Normalize(pixel_mean, pixel_std),
                        ])))
            else:
                train_dataset.append(
                    FileListLabeledDataset(
                        args.train_data_list[i], args.data_root[i],
                        Compose([
                            RandomResizedCrop(112,
                                              scale=(0.7, 1.2),
                                              ratio=(1. / 1., 4. / 1.)),
                            RandomHorizontalFlip(),
                            ColorJitter(brightness=[0.5, 1.5],
                                        contrast=[0.5, 1.5],
                                        saturation=[0.5, 1.5],
                                        hue=0),
                            ToTensor(),
                            Lighting(1, [0.2175, 0.0188, 0.0045],
                                     [[-0.5675, 0.7192, 0.4009],
                                      [-0.5808, -0.0045, -0.8140],
                                      [-0.5836, -0.6948, 0.4203]]),
                            Normalize(pixel_mean, pixel_std),
                        ])))
        #原来的
        # train_dataset  = [FileListLabeledDataset(
        # args.train_data_list[i], args.data_root[i],
        # Compose([
        #     RandomResizedCrop(112,scale=(0.7, 1.2), ratio=(1. / 1., 4. / 1.)),
        #     RandomHorizontalFlip(),
        #     ColorJitter(brightness=[0.5,1.5], contrast=[0.5,1.5], saturation=[0.5,1.5], hue= 0),
        #     ToTensor(),
        #     Lighting(1, [0.2175, 0.0188, 0.0045], [[-0.5675,  0.7192,  0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948,  0.4203]]),
        #     Normalize(pixel_mean, pixel_std),]),
        # memcached=False,
        # memcached_client="") for i in range(num_tasks)]

        args.num_classes = [td.num_class for td in train_dataset]
        train_longest_size = max([
            int(np.ceil(len(td) / float(bs)))
            for td, bs in zip(train_dataset, args.train_batch_size)
        ])
        train_sampler = [
            GivenSizeSampler(td,
                             total_size=train_longest_size * bs,
                             rand_seed=0)
            for td, bs in zip(train_dataset, args.train_batch_size)
        ]
        train_loader = [
            DataLoader(train_dataset[k],
                       batch_size=args.train_batch_size[k],
                       shuffle=False,
                       num_workers=args.workers,
                       pin_memory=False,
                       sampler=train_sampler[k]) for k in range(num_tasks)
        ]

        val_dataset = [
            FileListLabeledDataset(
                args.val_data_list[i],
                args.data_root_val[i],
                Compose([
                    Resize((112, 112)),
                    # CenterCrop(112),
                    ToTensor(),
                    Normalize(pixel_mean, pixel_std),
                ]),
                memcached=False,
                memcached_client="") for i in range(num_tasks)
        ]
        val_longest_size = max([
            int(np.ceil(len(vd) / float(bs)))
            for vd, bs in zip(val_dataset, args.val_batch_size)
        ])
        test_loader = [
            DataLoader(val_dataset[k],
                       batch_size=args.val_batch_size[k],
                       shuffle=False,
                       num_workers=args.workers,
                       pin_memory=False) for k in range(num_tasks)
        ]

    if args.model == "lenet3":
        model = LeNet(dataset=args.dataset)
    elif args.model == "vgg":
        model = vgg11_bn(pretrained=True)
    elif args.model == "resnet18":
        model = PreActResNet18()
    elif (args.model == "resnet50") or (args.model == "resnet50_noskip"):
        if args.dataset == "CIFAR10":
            model = PreActResNet50(dataset=args.dataset)
        else:
            from models.resnet import resnet50
            skip_gate = True
            if "noskip" in args.model:
                skip_gate = False

            if args.pruning_method not in [22, 40]:
                skip_gate = False
            model = resnet50(skip_gate=skip_gate)
    elif args.model == "resnet34":
        if not (args.dataset == "CIFAR10"):
            from models.resnet import resnet34
            model = resnet34()
    elif args.model == "multprun_gate5_gpu_0316_1":
        from models.multitask import MultiTaskWithLoss
        model = MultiTaskWithLoss(backbone=args.model,
                                  num_classes=args.num_classes,
                                  feature_dim=2560,
                                  spatial_size=112,
                                  arc_fc=False,
                                  feat_bn=False)
        print(model)
    elif args.model == "mult_prun8_gpu":
        from models.multitask import MultiTaskWithLoss
        model = MultiTaskWithLoss(backbone=args.model,
                                  num_classes=args.num_classes,
                                  feature_dim=18,
                                  spatial_size=112,
                                  arc_fc=False,
                                  feat_bn=False)
        print(model)
    elif args.model == "multnas5_gpu":  #作为修改项
        from models.multitask import MultiTaskWithLoss
        model = MultiTaskWithLoss(backbone=args.model,
                                  num_classes=args.num_classes,
                                  feature_dim=512,
                                  spatial_size=112,
                                  arc_fc=False,
                                  feat_bn=False)
        print(model)
    elif "resnet101" in args.model:
        if not (args.dataset == "CIFAR10"):
            from models.resnet import resnet101
            if args.dataset == "Imagenet":
                classes = 1000

            if "noskip" in args.model:
                model = resnet101(num_classes=classes, skip_gate=False)
            else:
                model = resnet101(num_classes=classes)

    elif args.model == "resnet20":
        if args.dataset == "CIFAR10":
            NotImplementedError(
                "resnet20 is not implemented in the current project")
            # from models.resnet_cifar import resnet20
            # model = resnet20()
    elif args.model == "resnet152":
        model = PreActResNet152()
    elif args.model == "densenet201_imagenet":
        from models.densenet_imagenet import DenseNet201
        model = DenseNet201(gate_types=['output_bn'], pretrained=True)
    elif args.model == "densenet121_imagenet":
        from models.densenet_imagenet import DenseNet121
        model = DenseNet121(gate_types=['output_bn'], pretrained=True)
    else:
        print(args.model, "model is not supported")

    ####end dataset preparation

    if args.dynamic_network:
        # attempts to load pruned model and modify it be removing pruned channels
        # works for resnet101 only
        if (len(args.load_model) > 0) and (args.dynamic_network):
            if os.path.isfile(args.load_model):
                load_model_pytorch(model, args.load_model, args.model)

            else:
                print("=> no checkpoint found at '{}'".format(args.load_model))
                exit()

        dynamic_network_change_local(model)

        # save the model
        log_save_folder = "%s" % args.name
        if not os.path.exists(log_save_folder):
            os.makedirs(log_save_folder)

        if not os.path.exists("%s/models" % (log_save_folder)):
            os.makedirs("%s/models" % (log_save_folder))

        model_save_path = "%s/models/pruned.weights" % (log_save_folder)
        model_state_dict = model.state_dict()
        if args.save_models:
            save_checkpoint({'state_dict': model_state_dict},
                            False,
                            filename=model_save_path)

    print("model is defined")

    # aux function to get size of feature maps
    # First it adds hooks for each conv layer
    # Then runs inference with 1 image
    output_sizes = get_conv_sizes(args, model)

    if use_cuda and not args.mgpu:
        model = model.to(device)
    elif args.distributed:
        model.cuda()
        print(
            "\n\n WARNING: distributed pruning was not verified and might not work correctly"
        )
        model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.mgpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.to(device)

    print(
        "model is set to device: use_cuda {}, args.mgpu {}, agrs.distributed {}"
        .format(use_cuda, args.mgpu, args.distributed))

    weight_decay = args.wd
    if args.fixed_network:
        weight_decay = 0.0

    # remove updates from gate layers, because we want them to be 0 or 1 constantly
    if 1:
        parameters_for_update = []
        parameters_for_update_named = []
        for name, m in model.named_parameters():
            if "gate" not in name:
                parameters_for_update.append(m)
                parameters_for_update_named.append((name, m))
            else:
                print("skipping parameter", name, "shape:", m.shape)

    total_size_params = sum(
        [np.prod(par.shape) for par in parameters_for_update])
    print("Total number of parameters, w/o usage of bn consts: ",
          total_size_params)

    optimizer = optim.SGD(parameters_for_update,
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=weight_decay)

    if 1:
        # helping optimizer to implement group lasso (with very small weight that doesn't affect training)
        # will be used to calculate number of remaining flops and parameters in the network
        group_wd_optimizer = group_lasso_decay(
            parameters_for_update,
            group_lasso_weight=args.group_wd_coeff,
            named_parameters=parameters_for_update_named,
            output_sizes=output_sizes)

    cudnn.benchmark = True

    # define objective
    criterion = nn.CrossEntropyLoss()

    ###=======================added for pruning
    # logging part
    log_save_folder = "%s" % args.name
    if not os.path.exists(log_save_folder):
        os.makedirs(log_save_folder)

    if not os.path.exists("%s/models" % (log_save_folder)):
        os.makedirs("%s/models" % (log_save_folder))

    train_writer = None
    if args.tensorboard:
        try:
            # tensorboardX v1.6
            train_writer = SummaryWriter(log_dir="%s" % (log_save_folder))
        except:
            # tensorboardX v1.7
            train_writer = SummaryWriter(logdir="%s" % (log_save_folder))

    time_point = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
    textfile = "%s/log_%s.txt" % (log_save_folder, time_point)
    stdout = Logger(textfile)
    sys.stdout = stdout
    print(" ".join(sys.argv))

    # initializing parameters for pruning
    # we can add weights of different layers or we can add gates (multiplies output with 1, useful only for gradient computation)
    pruning_engine = None
    if args.pruning:
        pruning_settings = dict()
        if not (args.pruning_config is None):
            pruning_settings_reader = PruningConfigReader()
            pruning_settings_reader.read_config(args.pruning_config)
            pruning_settings = pruning_settings_reader.get_parameters()

        # overwrite parameters from config file with those from command line
        # needs manual entry here
        # user_specified = [key for key in vars(default_args).keys() if not (vars(default_args)[key]==vars(args)[key])]
        # argv_of_interest = ['pruning_threshold', 'pruning-momentum', 'pruning_step', 'prune_per_iteration',
        #                     'fixed_layer', 'start_pruning_after_n_iterations', 'maximum_pruning_iterations',
        #                     'starting_neuron', 'prune_neurons_max', 'pruning_method']

        has_attribute = lambda x: any([x in a for a in sys.argv])

        if has_attribute('pruning-momentum'):
            pruning_settings['pruning_momentum'] = vars(
                args)['pruning_momentum']
        if has_attribute('pruning-method'):
            pruning_settings['method'] = vars(args)['pruning_method']

        pruning_parameters_list = prepare_pruning_list(
            pruning_settings,
            model,
            model_name=args.model,
            pruning_mask_from=args.pruning_mask_from,
            name=args.name)
        print("Total pruning layers:", len(pruning_parameters_list))

        folder_to_write = "%s" % log_save_folder + "/"
        log_folder = folder_to_write

        pruning_engine = pytorch_pruning(pruning_parameters_list,
                                         pruning_settings=pruning_settings,
                                         log_folder=log_folder)

        pruning_engine.connect_tensorboard(train_writer)
        pruning_engine.dataset = args.dataset
        pruning_engine.model = args.model
        pruning_engine.pruning_mask_from = args.pruning_mask_from
        pruning_engine.load_mask()
        gates_to_params = connect_gates_with_parameters_for_flops(
            args.model, parameters_for_update_named)
        pruning_engine.gates_to_params = gates_to_params

    ###=======================end for pruning
    # loading model file
    if (len(args.load_model) > 0) and (not args.dynamic_network):
        if os.path.isfile(args.load_model):
            if args.fineturn_model:
                checkpoint = torch.load(args.load_model)
                state_dict = checkpoint['state_dict']
                model = load_module_state_dict_checkpoint(model, state_dict)
            else:
                load_model_pytorch(model, args.load_model, args.model)
        else:
            print("=> no checkpoint found at '{}'".format(args.load_model))
            exit()

    if args.tensorboard and 0:
        if args.dataset == "CIFAR10":
            dummy_input = torch.rand(1, 3, 32, 32).to(device)
        elif args.dataset == "Imagenet":
            dummy_input = torch.rand(1, 3, 224, 224).to(device)

        train_writer.add_graph(model, dummy_input)

    for epoch in range(1, args.epochs + 1):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(args, optimizer, epoch, args.zero_lr_for_epochs,
                             train_writer)

        if not args.run_test and not args.get_inference_time:
            train(args,
                  model,
                  device,
                  train_loader,
                  optimizer,
                  epoch,
                  criterion,
                  train_writer=train_writer,
                  pruning_engine=pruning_engine)

        if args.pruning:
            # skip validation error calculation and model saving
            if pruning_engine.method == 50: continue

        # evaluate on validation set
        prec1 = validate(args,
                         test_loader,
                         model,
                         device,
                         criterion,
                         epoch,
                         train_writer=train_writer)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        model_save_path = "%s/models/checkpoint.weights" % (log_save_folder)
        paths = "%s/models" % (log_save_folder)
        model_state_dict = model.state_dict()
        if args.save_models:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model_state_dict,
                    'best_prec1': best_prec1,
                },
                is_best,
                filename=model_save_path)
            states = {
                'epoch': epoch + 1,
                'state_dict': model_state_dict,
            }
            torch.save(states, '{}/{}.pth.tar'.format(paths, epoch + 1))
Exemplo n.º 7
0
            transforms.ToTensor(),
            transforms.Normalize(**p['augmentation_kwargs']['normalize'])   #THIS IS RESPONSIBLE FOR DISTORDING THE IMAGE :-!
        ]) 
=======
            transforms.CenterCrop(p['augmentation_kwargs']['crop_size']), # It was random_resized_crop in simClr now changing
            batsnet_transformation(),  #THIS NEW LINE IS ADDED
            transforms.ToTensor(),
            transforms.Normalize(**p['augmentation_kwargs']['normalize'])
        ])
>>>>>>> db23360031c529a04f0a144b63e5f3fe49feb44f
    
    elif p['augmentation_strategy'] == 'ours':
        # Augmentation strategy from our paper 
        return transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(p['augmentation_kwargs']['crop_size']),
            Augment(p['augmentation_kwargs']['num_strong_augs']),
            transforms.ToTensor(),
            transforms.Normalize(**p['augmentation_kwargs']['normalize']),
            Cutout(
                n_holes = p['augmentation_kwargs']['cutout_kwargs']['n_holes'],
                length = p['augmentation_kwargs']['cutout_kwargs']['length'],
                random = p['augmentation_kwargs']['cutout_kwargs']['random'])])
    
    else:
        raise ValueError('Invalid augmentation strategy {}'.format(p['augmentation_strategy']))


def get_val_transformations(p):
    return transforms.Compose([
            transforms.CenterCrop(p['transformation_kwargs']['crop_size']),
Exemplo n.º 8
0
# Hyper Parameters
BATCH_SIZE = 8
LR = 0.01  # learning rate
EPSILON = 0.9  # greedy policy
GAMMA = 0.9  # reward discount
TARGET_REPLACE_ITER = 5  # target update frequency
MEMORY_CAPACITY = 20
env = AF_env(range_size=3, step_size=2, source_folder='./data/tmp')
N_ACTIONS = env.n_actions
HIDDEN_SIZE = 256
IMG_SIZE = 224
TEST_INTER = 10

TRANSFORM = transforms.Compose([
    transforms.RandomCrop(IMG_SIZE),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])


class DQN(object):
    def __init__(self):
        self.eval_net, self.target_net = RL_net(HIDDEN_SIZE,
                                                N_ACTIONS), RL_net(
                                                    HIDDEN_SIZE, N_ACTIONS)
        self.learn_step_counter = 0  # for target updating
        self.memory = memory(MEMORY_CAPACITY, IMG_SIZE)  # initialize memory
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()
Exemplo n.º 9
0
    straight_through=
    False  # straight-through for gumbel softmax. unclear if it is better one way or the other
).cuda()

optimizerVAE = torch.optim.Adam(vae.parameters(), lr=learning_rate)
"""
text = torch.randint(0, NUM_TOKENS, (BATCH_SIZE, TEXTSEQLEN))
images = torch.randn(BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE)
mask = torch.ones_like(text).bool()
"""

cap = dset.CocoCaptions(
    root='./coco/images',
    annFile='./coco/annotations/captions_val2014.json',
    transform=transforms.Compose([
        transforms.RandomCrop((IMAGE_SIZE, IMAGE_SIZE), pad_if_needed=True),
        #transforms.Grayscale(),
        #transforms.Resize((IMAGE_SIZE,IMAGE_SIZE),Image.BILINEAR),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]))

tokenDset = token_dataset('./coco/merged-1000.txt')

VAEloss = []

for epoch in range(EPOCHS):
    for i in range(DATASET_SIZE):
        #print(i,":",tokenDset.getRand(i),img.size())
        optimizerVAE.zero_grad()
        img, _ = cap[i]
Exemplo n.º 10
0
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO

#!pip install nltk
import nltk
nltk.download('punkt')

from data_loader import get_loader
from model import EncoderCNN, DecoderRNN


# Define a transform to pre-process the training images.
transform_train = transforms.Compose([ 
    transforms.Resize(256),                          # smaller edge of image resized to 256
    transforms.RandomCrop(224),                      # get 224x224 crop from random location
    transforms.RandomHorizontalFlip(),               # horizontally flip image with probability=0.5
    transforms.ToTensor(),                           # convert the PIL Image to a tensor
    transforms.Normalize((0.485, 0.456, 0.406),      # normalize image for pre-trained model
                         (0.229, 0.224, 0.225))])

# Set the minimum word count threshold.
vocab_threshold = 5
# Specify the batch size.
batch_size = 10
# Specify the dimensionality of the image embedding.
embed_size = 256

data_loader = get_loader(transform=transform_train,
                         mode='train',
                         batch_size=batch_size,
Exemplo n.º 11
0
def main():
    if args.tensorboard: configure("runs/%s"%(args.name))

    if args.augment:
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            ])
    else:
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        ])

    kwargs = {'num_workers': 1, 'pin_memory': True}

    if args.in_dataset == "CIFAR-10":
        # Data loading code
        normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
                                         std=[x/255.0 for x in [63.0, 62.1, 66.7]])
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('./datasets/cifar10', train=True, download=True,
                             transform=transform_train),
            batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test),
            batch_size=args.batch_size, shuffle=True, **kwargs)

        lr_schedule=[50, 75, 90]
        num_classes = 10
    elif args.in_dataset == "CIFAR-100":
        # Data loading code
        normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
                                         std=[x/255.0 for x in [63.0, 62.1, 66.7]])
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100('./datasets/cifar100', train=True, download=True,
                             transform=transform_train),
            batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100('./datasets/cifar100', train=False, transform=transform_test),
            batch_size=args.batch_size, shuffle=True, **kwargs)

        lr_schedule=[50, 75, 90]
        num_classes = 100
    elif args.in_dataset == "SVHN":
        # Data loading code
        normalizer = None
        train_loader = torch.utils.data.DataLoader(
            svhn.SVHN('datasets/svhn/', split='train',
                                      transform=transforms.ToTensor(), download=False),
            batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = torch.utils.data.DataLoader(
            svhn.SVHN('datasets/svhn/', split='test',
                                  transform=transforms.ToTensor(), download=False),
            batch_size=args.batch_size, shuffle=False, **kwargs)

        args.epochs = 20
        args.save_epoch = 2
        lr_schedule=[10, 15, 18]
        num_classes = 10

    # create model
    if args.model_arch == 'densenet':
        model = dn.DenseNet3(args.layers, num_classes, args.growth, reduction=args.reduce,
                             bottleneck=args.bottleneck, dropRate=args.droprate, normalizer=normalizer)
    elif args.model_arch == 'wideresnet':
        model = wn.WideResNet(args.depth, num_classes, widen_factor=args.width, dropRate=args.droprate, normalizer=normalizer)
    else:
        assert False, 'Not supported model arch: {}'.format(args.model_arch)

    # get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                nesterov=True,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))


    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, lr_schedule)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        if (epoch + 1) % args.save_epoch == 0:
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
            }, epoch + 1)
Exemplo n.º 12
0
WORK_DIR = '/tmp/imagenet'
NUM_EPOCHS = 10
BATCH_SIZE = 128
LEARNING_RATE = 1e-4
NUM_CLASSES = 10

MODEL_PATH = './models'
MODEL_NAME = 'Inception.pth'

# Create model
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)

transform = transforms.Compose([
    transforms.RandomCrop(256, padding=32),
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

# Load data
train_dataset = torchvision.datasets.ImageFolder(root=WORK_DIR + '/' + 'train',
                                                 transform=transform)

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True)

Exemplo n.º 13
0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') #输出结果保存路径
parser.add_argument('--net', default='./model/Resnet18.pth', help="path to net (to continue training)")  #恢复训练时的模型路径
args = parser.parse_args()

EPOCH = 40   #遍历数据集次数
pre_epoch = 0  # 定义已经遍历数据集的次数
BATCH_SIZE = 4      #批处理尺寸(batch_size)
LR = 0.001        #学习率
inputs = 32
# 准备数据集并预处理
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  #先四周填充0,在吧图像随机裁剪成32*32
    transforms.RandomHorizontalFlip(),  #图像一半的概率翻转,一半的概率不翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归一化用到的均值和方差
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = torchvision.datasets.CIFAR100(root='/home/wang/data100', train=True, download=True, transform=transform_train) #训练数据集
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)   #生成一个个batch进行批训练,组成batch的时候顺序打乱取

testset = torchvision.datasets.CIFAR100(root='/home/wang/data100', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=2, shuffle=False, num_workers=2)
Exemplo n.º 14
0
def get_transforms(dataset):
    transform_train = None
    transform_test = None
    if dataset == 'cifar10':
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    if dataset == 'cifar100':
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761)),
        ])

    if dataset == 'cinic-10':
        # cinic_directory = '/path/to/cinic/directory'
        cinic_mean = [0.47889522, 0.47227842, 0.43047404]
        cinic_std = [0.24205776, 0.23828046, 0.25874835]
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(cinic_mean, cinic_std)
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(cinic_mean, cinic_std)
        ])

    if dataset == 'tiny_imagenet':
        tiny_mean = [
            0.48024578664982126, 0.44807218089384643, 0.3975477478649648
        ]
        tiny_std = [0.2769864069088257, 0.26906448510256, 0.282081906210584]
        transform_train = transforms.Compose([
            transforms.RandomCrop(64, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(tiny_mean, tiny_std)
        ])

        transform_test = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(tiny_mean, tiny_std)])

    assert transform_test is not None and transform_train is not None, 'Error, no dataset %s' % dataset
    return transform_train, transform_test
Exemplo n.º 15
0
    print('successfully loaded model')
except:
    raise

seed = 42
torch.manual_seed(seed)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = model.to(device)

model.eval()

test_transform = transforms.Compose(
    [transforms.Scale(256),
     transforms.RandomCrop(224),
     transforms.ToTensor()])

test_df = pd.read_csv(args.test_csv, header=None)
test_imgs = test_df[0]
pbar = tqdm(total=len(test_imgs))

mean, std = 0.0, 0.0
for i, img in enumerate(test_imgs):
    im = Image.open(os.path.join(args.test_images, str(img) + '.jpg'))
    im = im.convert('RGB')
    imt = test_transform(im)
    imt = imt.unsqueeze(dim=0)
    imt = imt.to(device)
    with torch.no_grad():
        out = model(imt)
Exemplo n.º 16
0
def train_prednet(lr=0.01,
                  cnn=False,
                  model='PredNetTied',
                  circles=2,
                  gpunum=2):
    use_cuda = torch.cuda.is_available()
    best_acc = 0  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch
    batchsize = 256

    root = './'
    lr = lr

    models = {
        'PredictiveTied_LN_Multi': PredictiveTied_LN_Multi,
        'PredictiveTied_GN': PredictiveTied_GN,
        'PredictiveTiedKL': PredictiveTiedKL,
        'PredictiveTiedInstanceNorm': PredictiveTiedInstanceNorm,
        'PredictiveTiedLayerNorm': PredictiveTiedLayerNorm,
        'PredictiveTied3': PredictiveTied3,
        'PredictiveTied': PredictiveTied,
        'PredictiveComb': PredictiveComb,
        'PredictiveSELU': PredictiveSELU,
        'PredictiveNew': PredictiveNew,
        'Predictive': Predictive,
        'PredictiveFull': PredictiveFull,
        'PredNetNew': PredNetNew,
        'PredNet': PredNet,
        'PredNetLocal': PredNetLocal,
        'PredictiveNew2': PredictiveNew2,
        'PredictiveNew3': PredictiveNew3,
        'PredictiveTied_LN': PredictiveTied_LN
    }
    if cnn:
        modelname = model + '_'
    else:
        modelname = model + '_' + str(circles) + 'CLS_'

    # clearn folder
    checkpointpath = root + 'checkpoint/'
    logpath = root + 'log/'
    if not os.path.isdir(checkpointpath):
        os.mkdir(checkpointpath)
    if not os.path.isdir(logpath):
        os.mkdir(logpath)

    # Data
    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    trainset = torchvision.datasets.CIFAR100(root='./data',
                                             train=True,
                                             download=True,
                                             transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batchsize,
                                              shuffle=True,
                                              num_workers=2)
    testset = torchvision.datasets.CIFAR100(root='./data',
                                            train=False,
                                            download=True,
                                            transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)

    # Model
    print('==> Building model..')
    if cnn:
        net = models[model](num_classes=100)
    else:
        net = models[model](num_classes=100, cls=circles)

#    print(1)
# Define objective function
    criterion = nn.CrossEntropyLoss()
    #   print(2)
    if model == 'PredictiveComb':
        convparas = [p for p in net.FFconv.parameters()]+\
                        [p for p in net.FBconv.parameters()]+\
                        [p for p in net.linear.parameters()]+\
                        [p for p in net.comb.parameters()]
        optimizer = optim.SGD([
            {
                'params': convparas
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)
    elif model == 'PredictiveSELU':
        convparas = [p for p in net.FFconv.parameters()]+\
                        [p for p in net.FBconv.parameters()]+\
                        [p for p in net.linear.parameters()]+\
                        [p for p in net.selu.parameters()]
        rateparas = [p for p in net.b0.parameters()]
        optimizer = optim.SGD([
            {
                'params': convparas
            },
            {
                'params': rateparas,
                'weight_decay': 0
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    elif model == 'PredictiveTied3':
        convparas = [p for p in net.conv.parameters()]+\
                        [p for p in net.linear.parameters()]
        rateparas = [p for p in net.a0.parameters()]+\
                    [p for p in net.b0.parameters()]+\
                    [p for p in net.c0.parameters()]
        optimizer = optim.SGD([
            {
                'params': convparas
            },
            {
                'params': rateparas,
                'weight_decay': 0
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    elif model == 'PredictiveTiedKL':
        convparas = [p for p in net.conv.parameters()]+\
                        [p for p in net.linear.parameters()]
        rateparas = [p for p in net.b0.parameters()]

        optimizer = optim.SGD([
            {
                'params': convparas
            },
            {
                'params': rateparas,
                'weight_decay': 0
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    elif 'PredictiveTied' in model:
        convparas = [p for p in net.conv.parameters()]+\
                        [p for p in net.linear.parameters()]
        rateparas = [p for p in net.a0.parameters()]+\
                    [p for p in net.b0.parameters()]
        optimizer = optim.SGD([
            {
                'params': convparas
            },
            {
                'params': rateparas,
                'weight_decay': 0
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    else:
        convparas = [p for p in net.FFconv.parameters()]+\
                        [p for p in net.FBconv.parameters()]+\
                        [p for p in net.linear.parameters()]
        rateparas = [p for p in net.a0.parameters()]+\
                    [p for p in net.b0.parameters()]
        optimizer = optim.SGD([
            {
                'params': convparas
            },
            {
                'params': rateparas,
                'weight_decay': 0
            },
        ],
                              lr=lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    print(3)

    # Parallel computing
    if use_cuda:
        net.cuda()
        net = torch.nn.DataParallel(net, device_ids=range(gpunum))
        #        net = torch.nn.DataParallel(net, device_ids=[3])
        cudnn.benchmark = True

    print(4)

    # Training
    def train(epoch):
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        training_setting = 'batchsize=%d | epoch=%d | lr=%.1e ' % (
            batchsize, epoch, optimizer.param_groups[0]['lr'])
        statfile.write('\nTraining Setting: ' + training_setting + '\n')

        for batch_idx, (inputs, targets) in enumerate(trainloader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            inputs, targets = Variable(inputs), Variable(targets)
            outputs = net(inputs)
            #print(outputs.size())
            #print(targets.size())
            if model == 'PredictiveTied_LN_Multi':
                loss = 0
                for i in range(len(outputs)):
                    loss = loss + criterion(outputs[i], targets)
                loss.backward()
                optimizer.step()
            else:
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

#            train_loss += loss.data[0]
            train_loss += loss.item()
            if model == 'PredictiveTied_LN_Multi':
                _, predicted = torch.max(outputs[-1].data, 1)
            else:
                _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()

            progress_bar(
                batch_idx, len(trainloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (train_loss / (batch_idx + 1), 100. * (float)(correct) /
                 (float)(total), correct, total))
        statstr = 'Training: Epoch=%d | Loss: %.3f |  Acc: %.3f%% (%d/%d) | best acc: %.3f' \
                  % (epoch, train_loss/(batch_idx+1), 100.*(float)(correct)/(float)(total), correct, total, best_acc)
        statfile.write(statstr + '\n')
        return train_loss / (batch_idx + 1)

    # Testing
    def test(epoch):
        nonlocal best_acc
        net.eval()
        test_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(testloader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            inputs, targets = Variable(inputs), Variable(targets)
            outputs = net(inputs)
            #            print(outputs.size(),targets.size())
            if model == 'PredictiveTied_LN_Multi':
                loss = 0
                for i in range(len(outputs)):
                    loss = criterion(outputs[i], targets)
                    test_loss += loss.item()
            else:
                loss = criterion(outputs, targets)
                test_loss += loss.item()
            if model == 'PredictiveTied_LN_Multi':
                _, predicted = torch.max(outputs[-1].data, 1)
            else:
                _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()

            progress_bar(
                batch_idx, len(testloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (test_loss / (batch_idx + 1), 100. * (float)(correct) /
                 (float)(total), correct, total))
        statstr = 'Testing: Epoch=%d | Loss: %.3f |  Acc: %.3f%% (%d/%d) | best_acc: %.3f' \
                  % (epoch, test_loss/(batch_idx+1), 100.*(float)(correct)/(float)(total), correct, total, best_acc)
        statfile.write(statstr + '\n')

        # Save checkpoint.
        acc = 100. * correct / total
        state = {
            'state_dict': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
            #           'net': net.module if use_cuda else net,
        }
        torch.save(state, checkpointpath + modelname + '_last_ckpt.t7')
        if acc >= best_acc:
            print('Saving..')
            torch.save(state, checkpointpath + modelname + '_best_ckpt.t7')
            best_acc = acc

    # Set adaptive learning rates
    def decrease_learning_rate():
        """Decay the previous learning rate by 2"""
        for param_group in optimizer.param_groups:
            param_group['lr'] /= 2
        print('decreasing lr')

    def decrease_learning_rate_large():
        """Decay the previous learning rate by 10"""
        for param_group in optimizer.param_groups:
            param_group['lr'] /= 10
        print('decreasing lr')

    LossList = []
    for epoch in range(start_epoch, start_epoch + 200):
        statfile = open(
            logpath + 'training_stats_' + modelname + '_with' + '.txt', 'a+')
        if len(LossList) < 5:
            LossList.append(train(epoch))
        else:
            LossList[0] = LossList[1]
            LossList[1] = LossList[2]
            LossList[2] = LossList[3]
            LossList[3] = LossList[4]
            LossList[4] = train(epoch)
            slope = (-2 * (float)(LossList[0]) - (float)(LossList[1]) +
                     (float)(LossList[3]) + 2 *
                     (float)(LossList[4])) / (float)(LossList[4])
            print(-slope)
            if model == 'PridictiveRELU':
                if -slope < 0.5:
                    decrease_learning_rate()
                elif LossList[4] < 0.005:
                    break

            elif epoch % 10 == 0:
                if -slope > 0.3 and -slope < 0.5:
                    decrease_learning_rate()
                elif -slope < 0.3:
                    decrease_learning_rate_large()
                if LossList[4] < 0.01:
                    break

        test(epoch)
Exemplo n.º 17
0
    NUM_WORKERS = cfg['NUM_WORKERS']
    print("=" * 60)
    print("Overall Configurations:")
    print(cfg)
    print("=" * 60)

    writer = SummaryWriter(
        LOG_ROOT)  # writer for buffering intermedium results

    train_transform = transforms.Compose(
        [  # refer to https://pytorch.org/docs/stable/torchvision/transforms.html for more build-in online data augmentation
            transforms.Resize([
                int(128 * INPUT_SIZE[0] / 112),
                int(128 * INPUT_SIZE[0] / 112)
            ]),  # smaller side resized
            transforms.RandomCrop([INPUT_SIZE[0], INPUT_SIZE[1]]),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=RGB_MEAN, std=RGB_STD),
        ])

    dataset_train = datasets.ImageFolder(os.path.join(DATA_ROOT, 'imgs'),
                                         train_transform)

    # create a weighted random sampler to process imbalanced data
    weights = make_weights_for_balanced_classes(dataset_train.imgs,
                                                len(dataset_train.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        weights, len(weights))
Exemplo n.º 18
0
                     'val_hard': 'DVQA_val_hard_qa_oracle.json'}

val_files['FigureQA'] = {'val1': 'FigureQA_val1_qa.json',
                         'val2': 'FigureQA_val2_qa.json'}

test_files = dict()
test_files['FigureQA'] = {'test1': 'FigureQA_test1_qa.json',
                          'test2': 'FigureQA_test2_qa.json'}
test_files['DVQA'] = {}

transform_combo_train = dict()
transform_combo_test = dict()

transform_combo_train['FigureQA'] = transforms.Compose([
    transforms.Resize((224, 320)),
    transforms.RandomCrop(size=(224, 320), padding=8),
    transforms.RandomRotation(2.8),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.9365, 0.9303, 0.9295],
                         std=[1, 1, 1])
])

transform_combo_train['DVQA'] = transforms.Compose([
    transforms.Resize(256),
    transforms.RandomCrop(size=(256, 256), padding=8),
    transforms.RandomRotation(2.8),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.8744, 0.8792, 0.8836],
                         std=[1, 1, 1])

])
Exemplo n.º 19
0
def make_dataset():
    # Small noise is added, following SN-GAN
    def noise(x):
        return x + torch.FloatTensor(x.size()).uniform_(0, 1.0 / 128)

    if opt.dataset == "cifar10":
        trans = tfs.Compose([
            tfs.RandomCrop(opt.img_width, padding=4),
            tfs.RandomHorizontalFlip(),
            tfs.ToTensor(),
            tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
            tfs.Lambda(noise)
        ])
        data = CIFAR10(root=opt.root,
                       train=True,
                       download=True,
                       transform=trans)
        loader = DataLoader(data,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.workers)
    elif opt.dataset == "dog_and_cat_64":
        trans = tfs.Compose([
            tfs.RandomResizedCrop(opt.img_width,
                                  scale=(0.8, 0.9),
                                  ratio=(1.0, 1.0)),
            tfs.RandomHorizontalFlip(),
            tfs.ToTensor(),
            tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
            tfs.Lambda(noise)
        ])
        data = ImageFolder(opt.root, transform=trans)
        loader = DataLoader(data,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.workers)
    elif opt.dataset == "dog_and_cat_128":
        trans = tfs.Compose([
            tfs.RandomResizedCrop(opt.img_width,
                                  scale=(0.8, 0.9),
                                  ratio=(1.0, 1.0)),
            tfs.RandomHorizontalFlip(),
            tfs.ToTensor(),
            tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
            tfs.Lambda(noise)
        ])
        data = ImageFolder(opt.root, transform=trans)
        loader = DataLoader(data,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.workers)
    elif opt.dataset == "imagenet":
        trans = tfs.Compose([
            tfs.RandomResizedCrop(opt.img_width,
                                  scale=(0.8, 0.9),
                                  ratio=(1.0, 1.0)),
            tfs.RandomHorizontalFlip(),
            tfs.ToTensor(),
            tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
            tfs.Lambda(noise)
        ])
        data = ImageFolder(opt.root, transform=trans)
        loader = DataLoader(data,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.workers)
    else:
        raise ValueError(f"Unknown dataset: {opt.dataset}")
    return loader
Exemplo n.º 20
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    if args.arch.startswith('efficientnet'):
        transform_train = transforms.Compose([
            transforms.Resize(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        transform_test = transforms.Compose([
            transforms.Resize(224),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    else:
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root=args.datapath,
                          train=True,
                          download=False,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root=args.datapath,
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    elif args.arch.startswith('efficientnet'):
        model = EfficientNet.from_pretrained(args.arch,
                                             num_classes=num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)
    modeltype = type(model).__name__
    model, classifier = decomposeModel(model)
    classifier = classifier.cuda()

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    reg_net = None
    optimizer_reg = optim.SGD(classifier.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    if args.mtype == 'baseline':
        args.dcl_refsize = 0
        reg_net = regressor.Net(classifier,
                                optimizer_reg,
                                ref_size=args.dcl_refsize,
                                backendtype=modeltype)
    elif args.mtype == 'dcl':
        reg_net = regressor.Net(classifier,
                                optimizer_reg,
                                ref_size=args.dcl_refsize,
                                backendtype=modeltype,
                                dcl_offset=args.dcl_offset,
                                dcl_window=args.dcl_window,
                                QP_margin=args.dcl_QP_margin)
    elif args.mtype == 'gem':
        reg_net = gem.Net(classifier,
                          optimizer_reg,
                          n_memories=args.gem_memsize,
                          backendtype=modeltype)

    print(args)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Err.',
            'Valid Err.', 'Cos Bef.', 'Cos Aft.', 'Mag'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate_two(optimizer, optimizer_reg, epoch)

        print('Epoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_top1_acc, train_top5_acc, cong_bf, cong_af, mag = train(
            trainloader,
            model,
            criterion,
            optimizer,
            epoch,
            use_cuda,
            reg_net=reg_net)
        test_loss, test_top1_acc, test_top5_acc = test(testloader,
                                                       model,
                                                       criterion,
                                                       epoch,
                                                       use_cuda,
                                                       reg_net=reg_net)
        print(
            'train_loss: {:.4f}, train_top1_err: {:.2f}, train_top5_err: {:.2f}, test_loss: {:.4f}, test_top1_err: {:.2f}, test_top5_err: {:.2f}, cong_bf: {:.4f}, cong_af: {:.4f}, mag: {:.4f}'
            .format(train_loss, 100 - train_top1_acc, 100 - train_top5_acc,
                    test_loss, 100 - test_top1_acc, 100 - test_top5_acc,
                    cong_bf, cong_af, mag))

        # append logger file
        logger.append([
            state['lr'], train_loss, test_loss, 100 - train_top1_acc,
            100 - test_top1_acc, cong_bf, cong_af, mag
        ])

        # save model
        is_best = test_top1_acc > best_acc
        best_acc = max(test_top1_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'classifier_state_dict': reg_net.state_dict(),
                'acc': test_top1_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
                'cong_bf': cong_bf,
                'cong_af': cong_af,
                'mag': mag,
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()

    print('Best err:')
    print(100 - best_acc)
def main():
    global args, best_prec1, min_loss
    args = parser.parse_args()

    rank, world_size = dist_init(args.port)
    assert (args.batch_size % world_size == 0)
    assert (args.workers % world_size == 0)
    args.batch_size = args.batch_size // world_size
    args.workers = args.workers // world_size

    # create model
    print("=> creating model '{}'".format(args.arch))
    print("train source is:{}".format(args.train_source))
    print("save_path is: {}", format(args.save_path))
    if args.arch.startswith('inception_v3'):
        print('inception_v3 without aux_logits!')
        image_size = 341
        input_size = 299
        model = models.__dict__[args.arch](pretrained=True)
    else:
        image_size = 256
        input_size = 224
        model = models.__dict__[args.arch](pretrained=True)
    model = FineTuneModel(model, args.arch, 128)
    # print("model is: {}".format(model))
    model.cuda()
    model = DistModule(model)

    # optionally resume from a checkpoint
    if args.load_path:
        if args.resume_opt:
            best_prec1, start_epoch = load_state(args.load_path, model, optimizer=optimizer)
        else:
            # print('load weights from', args.load_path)
            load_state(args.load_path, model)

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = McDataset(
        args.train_root,
        args.train_source,
        transforms.Compose([
            transforms.Resize(image_size),
            transforms.RandomCrop(input_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            ColorAugmentation(),
            normalize,
        ]))
    val_dataset = McDataset(
        args.val_root,
        args.val_source,
        transforms.Compose([
            transforms.Resize(image_size),
            transforms.CenterCrop(input_size),
            # transforms.Resize(input_size),
            # transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            normalize,
        ]))

    train_sampler = DistributedSampler(train_dataset)
    val_sampler = DistributedSampler(val_dataset)

    train_loader = DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=False, sampler=train_sampler)

    val_loader = DataLoader(
        val_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=False, sampler=val_sampler)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    lr = 0
    patience = 0
    for epoch in range(args.start_epoch, args.epochs):
        # adjust_learning_rate(optimizer, epoch)
        train_sampler.set_epoch(epoch)

        if epoch == 3:
            lr = 0.00003
        if patience == 2:
        # if epoch == 0:
            patience = 0
            checkpoint = load_checkpoint(args.save_path+'_best.pth.tar')
            model.load_state_dict(checkpoint['state_dict'])
            print("Loading checkpoint_best.............")
            # lr = adjust_learning_rate(optimizer, lr)
            lr = lr / 10.0

        if epoch < 3:
            lr = 0.001
            for name, param in model.named_parameters():
                # print("name is: {}".format(name))
                if (name != 'module.classifier.0.weight' and name != 'module.classifier.0.bias'):
                    param.requires_grad = False
            optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
        else:
            for param in model.parameters():
                param.requires_grad = True
            optimizer = torch.optim.Adam(
                model.parameters(), lr=lr, weight_decay=0.0001)
        print("lr is: {}".format(lr))
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        val_prec1, val_losses = validate(val_loader, model, criterion)
        print("val_losses is: {}".format(val_losses))
        # remember best prec@1 and save checkpoint
        if rank == 0:
            # remember best prec@1 and save checkpoint
            if val_losses < min_loss:
                is_best = True
                save_checkpoint({
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.save_path)
                # torch.save(model.state_dict(), 'best_val_weight.pth')
                print('val score improved from {:.5f} to {:.5f}. Saved!'.format(min_loss, val_losses))

                min_loss = val_losses
                patience = 0
            else:
                patience += 1
        if rank == 1 or rank == 2 or rank == 3 or rank == 4 or rank == 5 or rank == 6 or rank == 7:
            if val_losses < min_loss:
                min_loss = val_losses
                patience = 0
            else:
                patience += 1
        print("patience is: {}".format(patience))
    print("min_loss is: {}".format(min_loss))
Exemplo n.º 22
0
                        help='Loss of the model')
    args = parser.parse_args()

    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'

    preprocess = transforms.Compose([
        transforms.ColorJitter(brightness=0.5,
                               contrast=0.5,
                               saturation=0.5,
                               hue=0.5),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomGrayscale(p=0.1),
        transforms.RandomCrop(size=28, pad_if_needed=True),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    val_process = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    if args.mode == 'pretrain' or 'cont_pre':
        train_folder = 'train_data/medium'
        val_folder = 'validation_classification/medium'
    elif args.mode == 'finetune':
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    else:
        print("We should be distributed")
        return
    # create model

    model = None
    if args.arch == "alexnet":
        model = AlexNet()
    if args.arch == "vgg16":
        model = VGG16()
    if args.arch == "vgg19":
        model = VGG19()
    if args.arch == "vgg13":
        model = VGG13()
    if args.arch == "lenet":
        model = LeNet()
    if args.arch == "resnet50":
        model = resnet50()
    if args.arch == "goolenet":
        model = GoogLeNet()

    torch.cuda.set_device(args.gpu)
    model.cuda(args.gpu)
    # When using a single GPU per process and per
    # DistributedDataParallel, we need to divide the batch size
    # ourselves based on the total number of GPUs we have
    args.batch_size = int(args.batch_size / ngpus_per_node)
    args.workers = int(args.workers / ngpus_per_node)
    # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
    model = distributed_layer_sparse_whole_randperm_threshold.DistributedDataParallel(
        model,
        device_id=args.gpu,
        sparse_ratio=args.sparse_ratio,
        sparse_threshold=args.sparse_threshold)

    args.file_name = args.method + '_' + args.arch + "batch-size_" + str(args.batch_size) + "_sparse-ratio_" + str(args.sparse_ratio) + \
        "_sparse-threshold_" + str(args.sparse_threshold) + "_memory-decay_" + str(args.memory_decay)

    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    #traindir = os.path.join(args.data, 'train')
    #valdir = os.path.join(args.data, 'val')
    #normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                                 std=[0.229, 0.224, 0.225])

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    train_dataset = torchvision.datasets.CIFAR10(root=args.data,
                                                 train=True,
                                                 download=True,
                                                 transform=transform_train)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               sampler=train_sampler)

    testset = torchvision.datasets.CIFAR10(root=args.data,
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    val_loader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return
    epoch_start = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        losses = train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)
            f = open(args.data + "/result_" + args.file_name + ".txt", "a+")
            f.write(
                str(epoch + 1) + '\t' + str(time.time() - epoch_start) + '\t' +
                str(losses.avg) + '\t' + str(acc1.item()) + '\n')
            f.close()
Exemplo n.º 24
0
def load_data(args):
    args.batch_size = 1
    train_loader = []
    test_loader = []
    if args.dataset_mode == "CIFAR10":
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('data', train=True, download=True, transform=transform_train),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=0
        )

        test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('data', train=False, transform=transform_test),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=1
        )
    elif args.dataset_mode == "CIFAR100":
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
        ])
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100('data', train=True, download=True, transform=transform_train),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=2
        )

        test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100('data', train=False, transform=transform_test),
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=2
        )
    elif args.dataset_mode == "MNIST":
        
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ])
        train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('data/newMNIST', train=True, download=True, transform=transform_train),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=2
        )

        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('data/newMNIST', train=False, transform=transform_test),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=2
        )
        
    return train_loader, test_loader
Exemplo n.º 25
0
def weight_pruning_by_name(model, layer_info):
    parent, child = layer_info
    exec('model.{}.{}.gate.z = 1'.format(str(parent), str(child))

def cifar10_loader():
    batch_size = 128
    print("cifar10 Data Loading ...")
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4914, 0.4824, 0.4467),
                             std=(0.2471, 0.2436, 0.2616))
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4914, 0.4824, 0.4467),
                             std=(0.2471, 0.2436, 0.2616))
    ])

    train_dataset = datasets.CIFAR10(root='../hhjung/cifar10/',
                                     train=True,
                                     transform=transform_train,
                                     download=True)

    test_dataset = datasets.CIFAR10(root='../hhjung/cifar10/',
                                    train=False,
                                    transform=transform_test)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=4)
    return train_loader, test_loader


def cifar100_loader():
    batch_size = 128
    print("cifar100 Data Loading ...")
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5071, 0.4867, 0.4408),
                             std=(0.2675, 0.2565, 0.2761))
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5071, 0.4867, 0.4408),
                             std=(0.2675, 0.2565, 0.2761))
    ])

    train_dataset = datasets.CIFAR100(root='../hhjung/cifar100/',
                                     train=True,
                                     transform=transform_train,
                                     download=True)

    test_dataset = datasets.CIFAR100(root='../hhjung/cifar100/',
                                    train=False,
                                    transform=transform_test)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=4)

    return train_loader, test_loader
Exemplo n.º 26
0
            for k, ema_v in self.ema.state_dict().items():
                if needs_module:
                    k = 'module.' + k
                model_v = msd[k].detach()
                model_v = model_v.to(device)
                ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
                # weight decay
                if 'bn' not in k:
                    msd[k] = msd[k] * (1. - self.wd)


# Data
print('==> Preparing data..')
if args.dataset == 'CIFAR10' or args.dataset == 'CIFAR100':
    transform_ori = transforms.Compose([
        transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2471, 0.2435, 0.2616)),
    ])
    transform_aug = transforms.Compose([
        transforms.ToTensor(),
        Cutout(n_holes=args.n_holes, length=args.cutout_size),
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
        CIFAR10Policy(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2471, 0.2435, 0.2616)),
Exemplo n.º 27
0
    def preprocess(self, data):
        ## Scales, crops, and normalizes a DICOM image for a PyTorch model, returns an Numpy array
        img_trsfm = transforms.Compose([
            transforms.ToPILImage(),
            # convert to PIL image. PIL does not support multi-channel floating point data, workaround is to transform first then expand to 3 channel
            transforms.Resize([516, 516],
                              interpolation=2),  # resize and crop to [512x512]
            transforms.RandomCrop(512),
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
            # classifier is fine-tuned on pretrained imagenet using mimic-jpeg images [HxWx3]
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        for k, v in data[0].items():
            logger.info('received data in post request: {0}: {1}'.format(
                k, v.decode()))
        ## download dicom from s3
        bucket = data[0].get("dicombucket").decode()
        key = data[0].get("dicomkey").decode()
        if self.access_key is not None and self.secret_key is not None and self.aws_region is not None:
            s3_client = boto3.client('s3',
                                     region_name=self.aws_region,
                                     aws_access_key_id=self.access_key,
                                     aws_secret_access_key=self.secret_key)
            ddb_client = boto3.client('dynamodb',
                                      region_name=self.aws_region,
                                      aws_access_key_id=self.access_key,
                                      aws_secret_access_key=self.secret_key)
        else:
            my_session = boto3.session.Session()
            s3_client = my_session.client('s3')
            ddb_client = my_session.client('dynamodb')

        s3_client.download_file(bucket, key, key)
        dicom = pydicom.dcmread(key)
        os.remove(key)
        if dicom.get_item('ViewPosition') and len(
                dicom.data_element('ViewPosition').value) > 0:
            dicom_array = dicom.pixel_array.astype(float)
            ## convert dicom to png thumbnail
            dicom_array_scaled = (np.maximum(dicom_array, 0) /
                                  dicom_array.max()) * 255.0
            dicom_array_scaled = np.uint8(dicom_array_scaled)
            out_png = key.replace('.dcm', '.full.png')
            with open(out_png, 'wb') as out_png_file:
                w = png.Writer(dicom_array.shape[1],
                               dicom_array.shape[0],
                               greyscale=True)
                w.write(out_png_file, dicom_array_scaled)
            pilimage = Image.open(out_png)
            os.remove(out_png)
            newsize = (int(pilimage.size[0] / 10), int(pilimage.size[1] / 10)
                       )  ## thumbnail is 1/10 in size
            pilimage = pilimage.resize(newsize)
            thumbnail_png = key.replace('.dcm', '.png')
            pilimage.save(thumbnail_png)
            ## upload png thumbnail to s3
            pngbucket = data[0].get("pngbucket").decode()
            prefix = data[0].get("prefix").decode()
            s3_thumbnail_png = prefix + '/' + thumbnail_png
            try:
                s3_client.head_object(Bucket=pngbucket, Key=s3_thumbnail_png)
            except:
                try:
                    s3_client.upload_file(thumbnail_png, pngbucket,
                                          s3_thumbnail_png)
                    os.remove(thumbnail_png)
                except ClientError as e:
                    print('upload thumbnail png error: {}'.format(e))

            ## dicom metadata to be saved in dynamodb
            metadata_dict = {"ImageId": {"S": key.replace('.dcm', '')}}
            metadata_dict["ViewPosition"] = {
                "S": dicom.data_element('ViewPosition').value
            }
            metadata_dict["Bucket"] = {"S": pngbucket}
            metadata_dict["Key"] = {"S": thumbnail_png}
            metadata_dict["ReportId"] = {
                "S": 's' + dicom.data_element('StudyID').value
            }
            metadata_dict["Modality"] = {
                "S": dicom.data_element('Modality').value
            }
            metadata_dict["BodyPartExamined"] = {
                "S": dicom.data_element('BodyPartExamined').value
            }

            ddb_table = data[0].get("ddb_table").decode()
            response = ddb_client.put_item(TableName=ddb_table,
                                           Item=metadata_dict)
            logger.info('Dynamodb create item status: {}'.format(
                response['ResponseMetadata']['HTTPStatusCode']))

            ## transform dicom arrary for pytorch model featurization
            X = np.asarray(dicom_array, np.float32) / (2**dicom.BitsStored - 1)
            image = img_trsfm(X).unsqueeze(0)
            es_endpoint = data[0].get("es_endpoint").decode()
            return {
                'id': key.split('.')[0],
                'image': image,
                'ViewPosition': dicom.data_element('ViewPosition').value,
                'ES': es_endpoint
            }
        else:
            return None
transform_ = transforms.Compose([
    #transforms.CenterCrop(size = (200, 200)),
    transforms.Resize(size=(192, 192), interpolation=1),
])

transform_flip = transforms.Compose([
    #transforms.CenterCrop(size = (200, 200)),
    transforms.Resize(size=(192, 192), interpolation=1),
    transforms.RandomHorizontalFlip(p=1),
])

transform_crop = transforms.Compose([
    #transforms.CenterCrop(size = (200, 200)),
    transforms.Resize(size=(192, 192), interpolation=1),
    transforms.RandomCrop(192, padding=7),
])

transform_flip_crop = transforms.Compose([
    #transforms.CenterCrop(size = (200, 200)),
    transforms.Resize(size=(192, 192), interpolation=1),
    transforms.RandomCrop(192, padding=7),
    transforms.RandomHorizontalFlip(p=1),
])


def pil2np(img):
    arr = np.array(img)
    arr = arr.transpose(2, 0, 1)
    return arr.astype(np.float32) / 255
Exemplo n.º 29
0
def make_dataloader_Pseudo(cfg):
    if cfg.DATASETS.HARD_AUG:
        train_transforms = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TRAIN),
            T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
            T.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.2),
            T.transforms.RandomAffine(0, translate=None, scale=[0.9, 1.1], shear=None, resample=False, fillcolor=128),
            T.ToTensor(),
            T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD),
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
        ])
    else:
        train_transforms = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TRAIN),
            T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
            T.ToTensor(),
            T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD),
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
        ])
    val_transforms = T.Compose([
        T.Resize(cfg.INPUT.SIZE_TEST),
        T.ToTensor(),
        T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
    ])

    num_workers = cfg.DATALOADER.NUM_WORKERS

    dataset = __factory[cfg.DATASETS.NAMES](root=cfg.DATASETS.ROOT_DIR)
    num_classes = dataset.num_train_pids

    train_set = ImageDataset(dataset.train, train_transforms)

    if 'triplet' in cfg.DATALOADER.SAMPLER:
        train_loader = DataLoader(
            train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH,
            sampler=RandomIdentitySampler(dataset.train, cfg.SOLVER.IMS_PER_BATCH, cfg.DATALOADER.NUM_INSTANCE),
            num_workers=num_workers, collate_fn=train_collate_fn, pin_memory=True
        )
    elif cfg.DATALOADER.SAMPLER == 'softmax':
        print('using softmax sampler')
        train_loader = DataLoader(
            train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, shuffle=True, num_workers=num_workers,
            collate_fn=train_collate_fn
        )
    else:
        print('unsupported sampler! expected softmax or triplet but got {}'.format(cfg.SAMPLER))

    '''
    val_set_green = ImageDataset(dataset.query_green + dataset.gallery_green, val_transforms)
    val_loader_green = DataLoader(
        val_set_green, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,
        collate_fn=val_collate_fn
    )
    '''

    #return train_loader, val_loader_green, len(dataset.query_green), num_classes, dataset, train_set, train_transforms
    return train_loader, num_classes, dataset, train_set, train_transforms
Exemplo n.º 30
0
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from skimage.color import rgb2lab, rgb2gray

data_augmentation = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(256),
    transforms.Resize(224)
])


class ColorDataset(Dataset):
    def __init__(self, phase):
        assert (phase in ['train', 'val', 'test'])
        self.phase = phase
        self.root_dir = '/home/wsf/Pictures/Wallpapers'
        self.samples = None
        with open('{}/labels/{}.txt'.format(self.root_dir, phase), 'r') as f:
            self.samples = f.readlines()[:3]

        print('[+] dataset `{}` loaded {} images'.format(self.phase, len(self.samples)))

    def __getitem__(self, idx):
        if self.phase == 'train' or self.phase == 'val':
            image_path, label = self.samples[idx].strip().split()
            label = np.array(int(label))
            image = Image.open('{}/images/{}'.format(self.root_dir, image_path)).convert('RGB')
            image = data_augmentation(image)