Exemple #1
0
def main(argv):
    # set constant
    cube_size = [10, 10, 3]

    #
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--dataset', help='dataset', default='')
    parser.add_argument('-g', '--height', help='frame height', default=120)
    parser.add_argument('-w', '--width', help='frame width', default=160)
    parser.add_argument('-t', '--task', help='task to perform', default=-1)
    parser.add_argument('-c',
                        '--clip',
                        help='clip index (zero-based)',
                        default=-1)
    parser.add_argument('-s', '--set', help='test set', default=1)
    parser.add_argument('-e', '--epoch', help='epoch destination', default=0)
    parser.add_argument('-m', '--model', help='start model idx', default=0)
    args = vars(parser.parse_args())
    #
    dataset = dataset_dict[args['dataset']]
    dataset['path_train'] = '%s/Train' % dataset['path']
    dataset['path_test'] = '%s/Test' % dataset['path']
    #
    task = int(args['task'])
    h = int(args['height'])
    w = int(args['width'])
    clip_idx = int(args['clip'])
    test_set = bool(int(args['set']))
    n_epoch_destination = int(args['epoch'])
    model_idx_to_start = int(args['model'])
    model_test = model_idx_to_start
    n_row, n_col = np.array([h, w]) // cube_size[:2]
    print('Selected task = %d' % task)
    print('started time: %s' % datetime.datetime.now())
    #
    dataset['cube_dir'] = './training_saver/%s/cube_%d_%d_%d_%d_%d' % (
        dataset['name'], h, w, cube_size[0], cube_size[1], cube_size[2])
    if not os.path.exists(dataset['cube_dir']):
        pathlib.Path(dataset['cube_dir']).mkdir(parents=True, exist_ok=True)
    '''========================================='''
    ''' Task 1: Resize frame resolution dataset '''
    '''========================================='''
    if task == 1:
        load_images_and_resize(dataset,
                               new_size=[h, w],
                               train=True,
                               force_recalc=False,
                               return_entire_data=False)
        load_images_and_resize(dataset,
                               new_size=[h, w],
                               train=False,
                               force_recalc=False,
                               return_entire_data=False)
    '''========================================='''
    ''' Task 2: Split cubes in dataset and save '''
    '''========================================='''
    if task == 2:
        split_cubes(dataset,
                    clip_idx,
                    cube_size,
                    training_set=not test_set,
                    force_recalc=False,
                    dist_thresh=None)
    '''=========================================='''
    ''' Task 3: Train model and check validation '''
    '''=========================================='''
    if task == 3:
        training_cubes, training_mapping = load_all_cubes_in_set(
            dataset, h, w, cube_size, training_set=True)
        train_model_naive_with_batch_norm(dataset,
                                          training_cubes,
                                          training_mapping[:, 2],
                                          training_mapping[:, 3],
                                          n_row,
                                          n_col,
                                          n_epoch_destination,
                                          start_model_idx=model_idx_to_start,
                                          batch_size=256 * 12)
    '''====================================='''
    ''' Task 4: Test model and save outputs '''
    '''====================================='''
    if task == 4:
        sequence_n_frame = count_sequence_n_frame(dataset, test=test_set)
        test_cubes, test_mapping = split_cubes(dataset,
                                               clip_idx,
                                               cube_size,
                                               training_set=not test_set)
        test_model_naive_with_batch_norm(dataset,
                                         test_cubes,
                                         test_mapping[:, 2],
                                         test_mapping[:, 3],
                                         n_row,
                                         n_col,
                                         sequence_n_frame,
                                         clip_idx,
                                         model_idx=model_test,
                                         batch_size=256 * 12,
                                         using_test_data=test_set)
    '''====================================='''
    ''' Task 5: Calculate scores of dataset '''
    '''====================================='''
    if task == 5:
        calc_score_full_clips(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              train=False)
        calc_score_full_clips(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              train=True)
    '''========================='''
    ''' Task -5: Plot error map '''
    '''========================='''
    if task == -5:
        frame_idx = np.arange(16)
        print('selected set:', 'Test' if test_set else 'Train')
        print('selected frames:', frame_idx)
        plot_error_map(dataset,
                       np.array([h, w]),
                       cube_size,
                       clip_idx,
                       frame_idx,
                       model_test,
                       score_type_idx=3,
                       using_test_data=test_set)
    '''===================='''
    ''' Task 6: Evaluation '''
    '''===================='''
    if task == 6:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size, is_subway=False)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            full_assess_AUC(dataset,
                            np.array([h, w]),
                            cube_size,
                            model_test,
                            labels_select_first,
                            sequence_n_frame=sequence_n_frame,
                            plot_pr_idx=None,
                            selected_score_estimation_way=way,
                            operation=op,
                            save_roc_pr=True)
    '''============================'''
    ''' Task -6: Manual evaluation '''
    '''============================'''
    if task == -6:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size, is_subway=False)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_mid,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_first,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_last,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
    '''==================================='''
    ''' Task 7: Multiple scale evaluation '''
    '''==================================='''
    if task == 7:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_mid,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_first,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_last,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
    '''========================='''
    ''' Task 08: Write video    '''
    ''' Task 11: Save score plot'''
    '''========================='''
    if task == 8 or task == 11:
        frame_ranges = {'Belleview': (50, 443 + 157), 'Train': (2100, 3200)}
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        write_video_result(dataset,
                           np.array([h, w]),
                           cube_size,
                           clip_idx,
                           model_test,
                           train=not test_set,
                           operation=np.std,
                           frame_gt=dataset['ground_truth'][clip_idx],
                           show_all_score=False,
                           frame_range=frame_ranges[dataset['name']]
                           if dataset in [Belleview, Train] else None,
                           show_clf=dataset in [Belleview, Train],
                           save_plot_exam_only=(task == 11))
    '''======================='''
    ''' Task -8: Write images '''
    '''======================='''
    if task == -8:
        write_example(dataset,
                      np.array([h, w]),
                      cube_size,
                      clip_idx,
                      model_test,
                      operation=np.std,
                      scale_video=not True,
                      wrapall=True)
    '''============================='''
    ''' Task 9: Visualize G filters '''
    '''============================='''
    if task == 9:
        visualize_filters(dataset,
                          cube_size,
                          n_row,
                          n_col,
                          model_idx=model_test)
    '''============================'''
    ''' Task -9: Visualize weights '''
    '''============================'''
    if task == -9:
        get_weights(dataset,
                    np.array([h, w]),
                    cube_size,
                    model_test,
                    np.std,
                    save_as_image=True)
    '''====================================='''
    ''' Task 10: Convert model to visualize '''
    '''====================================='''
    if task == 10:
        convert_model(dataset, cube_size, n_row, n_col, model_idx=model_test)

    print('finished time: %s' % datetime.datetime.now())
def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    train_transform = T.Compose(
        [T.Resize(args.resize_size),
         T.ToTensor(), normalize])
    val_transform = T.Compose(
        [T.Resize(args.resize_size),
         T.ToTensor(), normalize])

    dataset = datasets.__dict__[args.data]
    train_source_dataset = dataset(root=args.root,
                                   task=args.source,
                                   split='train',
                                   download=True,
                                   transform=train_transform)
    train_source_loader = DataLoader(train_source_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.workers,
                                     drop_last=True)
    train_target_dataset = dataset(root=args.root,
                                   task=args.target,
                                   split='train',
                                   download=True,
                                   transform=train_transform)
    train_target_loader = DataLoader(train_target_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.workers,
                                     drop_last=True)
    val_dataset = dataset(root=args.root,
                          task=args.target,
                          split='test',
                          download=True,
                          transform=val_transform)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using pre-trained model '{}'".format(args.arch))
    num_factors = train_source_dataset.num_factors
    backbone = models.__dict__[args.arch](pretrained=True)
    bottleneck_dim = args.bottleneck_dim
    if args.normalization == 'IN':
        backbone = convert_model(backbone)
        bottleneck = nn.Sequential(
            nn.Conv2d(backbone.out_features,
                      bottleneck_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1),
            nn.InstanceNorm2d(bottleneck_dim),
            nn.ReLU(),
        )
        head = nn.Sequential(
            nn.Conv2d(bottleneck_dim,
                      bottleneck_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.InstanceNorm2d(bottleneck_dim), nn.ReLU(),
            nn.Conv2d(bottleneck_dim,
                      bottleneck_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.InstanceNorm2d(bottleneck_dim), nn.ReLU(),
            nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(),
            nn.Linear(bottleneck_dim, num_factors), nn.Sigmoid())
        for layer in head:
            if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
                nn.init.normal_(layer.weight, 0, 0.01)
                nn.init.constant_(layer.bias, 0)
        adv_head = nn.Sequential(
            nn.Conv2d(bottleneck_dim,
                      bottleneck_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.InstanceNorm2d(bottleneck_dim), nn.ReLU(),
            nn.Conv2d(bottleneck_dim,
                      bottleneck_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.InstanceNorm2d(bottleneck_dim), nn.ReLU(),
            nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(),
            nn.Linear(bottleneck_dim, num_factors), nn.Sigmoid())
        for layer in adv_head:
            if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
                nn.init.normal_(layer.weight, 0, 0.01)
                nn.init.constant_(layer.bias, 0)
        regressor = ImageRegressor(backbone,
                                   num_factors,
                                   bottleneck=bottleneck,
                                   head=head,
                                   adv_head=adv_head,
                                   bottleneck_dim=bottleneck_dim,
                                   width=bottleneck_dim)
    else:
        regressor = ImageRegressor(backbone,
                                   num_factors,
                                   bottleneck_dim=bottleneck_dim,
                                   width=bottleneck_dim)

    regressor = regressor.to(device)
    print(regressor)
    mdd = MarginDisparityDiscrepancy(args.margin).to(device)

    # define optimizer and lr scheduler
    optimizer = SGD(regressor.get_parameters(),
                    args.lr,
                    momentum=args.momentum,
                    weight_decay=args.wd,
                    nesterov=True)
    lr_scheduler = LambdaLR(
        optimizer, lambda x: args.lr *
        (1. + args.lr_gamma * float(x))**(-args.lr_decay))

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'),
                                map_location='cpu')
        regressor.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        train_source_loader = DataLoader(train_source_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=args.workers,
                                         drop_last=True)
        train_target_loader = DataLoader(train_target_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=args.workers,
                                         drop_last=True)
        # extract features from both domains
        feature_extractor = nn.Sequential(regressor.backbone,
                                          regressor.bottleneck,
                                          regressor.head[:-2]).to(device)
        source_feature = collect_feature(train_source_loader,
                                         feature_extractor, device)
        target_feature = collect_feature(train_target_loader,
                                         feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature,
                                          device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        mae = validate(val_loader, regressor, args,
                       train_source_dataset.factors, device)
        print(mae)
        return

    # start training
    best_mae = 100000.
    for epoch in range(args.epochs):
        # train for one epoch
        print("lr", lr_scheduler.get_lr())
        train(train_source_iter, train_target_iter, regressor, mdd, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        mae = validate(val_loader, regressor, args,
                       train_source_dataset.factors, device)

        # remember best mae and save checkpoint
        torch.save(regressor.state_dict(),
                   logger.get_checkpoint_path('latest'))
        if mae < best_mae:
            shutil.copy(logger.get_checkpoint_path('latest'),
                        logger.get_checkpoint_path('best'))
        best_mae = min(mae, best_mae)
        print("mean MAE {:6.3f} best MAE {:6.3f}".format(mae, best_mae))

    print("best_mae = {:6.3f}".format(best_mae))

    logger.close()
Exemple #3
0
                        default=f'./output/{get_current_time()}',
                        help='path for saving results')

opts = arg_parser.parse_args()
device = f'cuda:{opts.gpu_id}' if opts.gpu_id > -1 else 'cpu'

# prapare folder for results
os.makedirs(opts.output_path, exist_ok=True)

gvs_net = GVSNet(opts)
gvs_net.load_state_dict(torch.load(opts.pre_trained_gvsnet), strict=True)
gvs_net.to(device)
gvs_net.eval()

if device == 'cpu':
    gvs_net = convert_model(gvs_net)
dataset = DataLoader(get_dataset(opts.dataset)(opts),
                     batch_size=1,
                     shuffle=False)

saver_results = SaveResults(opts.output_path, opts.dataset)

for itr, data in tqdm.tqdm(enumerate(dataset), total=len(dataset)):
    data = {k: v.to(device) for k, v in data.items()}
    # Let's get a list of camera poses
    # modify get_cam_poses function if you need specific camera movement
    data['t_vec'], data['r_mat'] = get_cam_poses(opts.movement_type,
                                                 b_size=opts.batch_size)
    data['t_vec'] = [t.to(device) for t in data['t_vec']]
    data['r_mat'] = [r.to(device) for r in data['r_mat']]
    # Render the scene from the chosen camera poses
Exemple #4
0
def main():
    global args, best_prec1

    ### Calculate FLOPs & Param
    model = getattr(models, args.model)(args)
    print(model)
    if args.data in ['cifar10', 'cifar100']:
        IMAGE_SIZE = 32
    else:
        IMAGE_SIZE = 224
    n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
    args.filename = "%s_%s_%s.txt" % \
        (args.model, int(n_params), int(n_flops))
    del (model)
    print(args)

    torch.manual_seed(args.manual_seed)
    # torch.cuda.manual_seed_all(args.manual_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(args.manual_seed)

    ### Create model
    model = getattr(models, args.model)(args)

    if args.model.startswith('alexnet') or args.model.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    ### Define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    ### Optionally resume from a checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args)
        if checkpoint is not None:
            args.start_epoch = checkpoint['epoch'] + 1
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    ### Optionally convert from a model
    if args.convert_from is not None:
        args.evaluate = True
        state_dict = torch.load(args.convert_from)['state_dict']
        model.load_state_dict(state_dict)
        model = model.cpu().module
        convert_model(model, args)
        model = nn.DataParallel(model).cuda()
        head, tail = os.path.split(args.convert_from)
        tail = "converted_" + tail
        torch.save({'state_dict': model.state_dict()},
                   os.path.join(head, tail))

    ### Optionally evaluate from a model
    if args.evaluate_from is not None:
        args.evaluate = True
        state_dict = torch.load(args.evaluate_from)['state_dict']
        model.load_state_dict(state_dict)

    ### Data loading
    dataset_dir = args.dataset_dir if args.dataset_dir is not None else '../data'
    if args.data == "cifar10":
        normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
                                         std=[0.2471, 0.2435, 0.2616])
        train_set = datasets.CIFAR10(dataset_dir,
                                     train=True,
                                     download=True,
                                     transform=transforms.Compose([
                                         transforms.RandomCrop(32, padding=4),
                                         transforms.RandomHorizontalFlip(),
                                         transforms.ToTensor(),
                                         normalize,
                                     ]))
        val_set = datasets.CIFAR10(dataset_dir,
                                   train=False,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    elif args.data == "cifar100":
        normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
                                         std=[0.2675, 0.2565, 0.2761])
        train_set = datasets.CIFAR100(dataset_dir,
                                      train=True,
                                      download=True,
                                      transform=transforms.Compose([
                                          transforms.RandomCrop(32, padding=4),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(),
                                          normalize,
                                      ]))
        val_set = datasets.CIFAR100(dataset_dir,
                                    train=False,
                                    transform=transforms.Compose([
                                        transforms.ToTensor(),
                                        normalize,
                                    ]))
    else:
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        train_set = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomSizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        val_set = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        ### Train for one epoch
        tr_prec1, tr_prec5, loss, lr = \
            train(train_loader, model, criterion, optimizer, epoch)

        ### Evaluate on validation set
        val_prec1, val_prec5 = validate(val_loader, model, criterion)

        ### Remember best prec@1 and save checkpoint
        is_best = val_prec1 < best_prec1
        best_prec1 = max(val_prec1, best_prec1)
        model_filename = 'checkpoint_%03d.pth.tar' % epoch
        save_checkpoint(
            {
                'epoch': epoch,
                'model': args.model,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, args, is_best, model_filename,
            "%.4f %.4f %.4f %.4f %.4f %.4f\n" %
            (val_prec1, val_prec5, tr_prec1, tr_prec5, loss, lr))

    ### Convert model and test
    model = model.cpu().module
    convert_model(model, args)
    model = nn.DataParallel(model).cuda()
    print(model)
    validate(val_loader, model, criterion)
    n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
    return
Exemple #5
0
def main():
    global args, best_prec1

    ### Calculate FLOPs & Param
    model = getattr(models, args.model)(args)
    print(model)
    if args.data in ['cifar10', 'cifar100']:
        IMAGE_SIZE = 32
    else:
        IMAGE_SIZE = 224
    n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
    del (model)
    args.no_save_model = True
    print(args)

    ### Create model
    model = getattr(models, args.model)(args)

    if args.model.startswith('alexnet') or args.model.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    ### Define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    ### Optionally resume from a checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args)
        if checkpoint is not None:
            args.start_epoch = checkpoint['epoch'] + 1
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    ### Optionally convert from a model and saves it to onnx file
    if args.convert_from is not None:
        args.evaluate = True
        state_dict = torch.load(args.convert_from)['state_dict']
        model.load_state_dict(state_dict)
        model = model.cpu().module
        convert_model(model, args)
        model = model.cuda()
        # head, tail = os.path.split(args.convert_from)
        # tail = "converted_" + tail
        # torch.save({'state_dict': model.state_dict()}, os.path.join(head, tail))

        dummy_input = torch.randn(args.batch_size,
                                  3,
                                  IMAGE_SIZE,
                                  IMAGE_SIZE,
                                  device='cuda')
        torch.onnx.export(model,
                          dummy_input,
                          args.convert_from + ".onnx",
                          verbose=True)

        return

    ### Optionally evaluate from a model
    if args.evaluate_from is not None:
        args.evaluate = True
        state_dict = torch.load(args.evaluate_from)['state_dict']
        model.load_state_dict(state_dict)

    cudnn.benchmark = True

    ### Data loading
    if args.data == "cifar10":
        norm_mean = [0.49139968, 0.48215827, 0.44653124]
        norm_std = [0.24703233, 0.24348505, 0.26158768]
        norm_transform = transforms.Normalize(norm_mean, norm_std)

        train_set = datasets.CIFAR10('~/Documents/CIFAR-10',
                                     train=True,
                                     download=True,
                                     transform=transforms.Compose([
                                         transforms.RandomCrop(
                                             32,
                                             padding=4,
                                             padding_mode='reflect'),
                                         transforms.RandomHorizontalFlip(),
                                         transforms.ToTensor(),
                                         norm_transform,
                                     ]))
        val_set = datasets.CIFAR10('~/Documents/CIFAR-10',
                                   train=False,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       norm_transform,
                                   ]))
    elif args.data == "cifar100":
        normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
                                         std=[0.2675, 0.2565, 0.2761])
        train_set = datasets.CIFAR100('../data',
                                      train=True,
                                      download=True,
                                      transform=transforms.Compose([
                                          transforms.RandomCrop(32, padding=4),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(),
                                          normalize,
                                      ]))
        val_set = datasets.CIFAR100('../data',
                                    train=False,
                                    transform=transforms.Compose([
                                        transforms.ToTensor(),
                                        normalize,
                                    ]))
    else:
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        train_set = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomSizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        val_set = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        ### Train for one epoch
        tr_prec1, tr_prec5, loss, lr = \
            train(train_loader, model, criterion, optimizer, epoch)

        ### Evaluate on validation set
        val_prec1, val_prec5 = validate(val_loader, model, criterion)

        ### Remember best prec@1
        is_best = val_prec1 < best_prec1
        best_prec1 = max(val_prec1, best_prec1)

    save_checkpoint(
        {
            'epoch': epoch,
            'model': args.model,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
        }, args, is_best, args.file_name)

    ### Convert model and test
    model = model.cpu().module
    convert_model(model, args)
    model = nn.DataParallel(model).cuda()
    print(model)
    validate(val_loader, model, criterion)
    n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
    return