def _resnext(arch, block, layers, pretrained, progress, **kwargs):
    model = ResNet(block, layers, **kwargs)
    if pretrained == True:
        state_dict = load_state_dict_from_url(model_urls[arch],
                                              progress=progress)
        model.load_state_dict(state_dict)
    return model
Esempio n. 2
0
def main(args):
    transform = getTransforms()

    data_path = args.input_data
    if not os.path.exists(data_path):
        print('ERROR: No dataset named {}'.format(data_path))
        exit(1)

    dataset = EvalDataset(data_path, transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1)

    with open(args.class_list, 'r') as class_file:
        class_names = []
        for class_name in class_file.readlines():
            if len(class_name.strip()) > 0:
                class_names.append(class_name.strip())

    model = ResNet(num_layers=18, num_classes=len(class_names)).to(DEVICE)
    model = model.eval()

    output_dir = os.path.join(data_path, 'out')
    os.makedirs(output_dir, exist_ok=True)

    model_file = args.model_file

    if os.path.exists(model_file):
        checkpoint = torch.load(model_file)
        if 'state_dict' in checkpoint.keys():
            model.load_state_dict(checkpoint['state_dict'], strict=False)
        else:
            model.load_state_dict(checkpoint, strict=False)
        print('=> loaded {}'.format(model_file))

    else:
        print('model_file "{}" does not exists.'.format(model_file))
        exit(1)

    font = cv2.FONT_HERSHEY_SIMPLEX

    with torch.no_grad():
        for data, path in dataloader:
            outputs = model(data.to(DEVICE))
            _, predicted = torch.max(outputs.data, 1)
            predicted = predicted.to('cpu')[0].item()
            class_text = class_names[predicted]
            print(class_text, path)

            image = cv2.imread(path[0], cv2.IMREAD_COLOR)
            image = cv2.rectangle(image, (0, 0), (150, 25), (255, 255, 255),
                                  -1)
            image = cv2.rectangle(image, (0, 0), (150, 25), (255, 0, 0), 2)
            cv2.putText(image, class_text, (5, 15), font, 0.5, (
                255,
                0,
            ), 1, cv2.LINE_AA)
            cv2.imwrite(os.path.join(output_dir, os.path.basename(path[0])),
                        image)
def se_resnet50(num_classes=199, pretrained=False):
    model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    if pretrained:
        model.load_state_dict(
            load_state_dict_from_url(
                "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl"
            ))
    return model
Esempio n. 4
0
def resnest269(pretrained=False, root='~/.encoding/models', **kwargs):
    model = ResNet(Bottleneck, [3, 30, 48, 8],
                   radix=2, groups=1, bottleneck_width=64,
                   deep_stem=True, stem_width=64, avg_down=True,
                   avd=True, avd_first=False, **kwargs)
    if pretrained:
        model.load_state_dict(torch.hub.load_state_dict_from_url(
            resnest_model_urls['resnest269'], progress=True, check_hash=True))
    return model
Esempio n. 5
0
def main():

    exp_name = f'baseline_{now()}'
    device, log, result_dir = setup(exp_name, conf)

    train_df = load_csv(conf.train_csv)
    if conf.npy:
        train_images = np.load(conf.train_images)
    else:
        train_images = pd.read_parquet(conf.train_images)

    test_df = load_csv(conf.test_csv)
    if conf.npy:
        test_images = np.load(conf.test_images)
    else:
        test_images = pd.read_parquet(conf.test_images)

    log.info('done')
    for i in range(5):
        if i != conf.fold:
            continue

        if "resnet" in conf.arch or "resnext" in conf.arch:
            model_ft = ResNet(conf,
                              arch_name=conf.arch,
                              input_size=conf.image_size)
            model_ft.load_state_dict(
                torch.load("result/baseline_2020_03_21_13_01_08/model_0.pkl"))
        elif "densenet" in conf.arch:
            model_ft = DenseNet(conf,
                                arch_name=conf.arch,
                                input_size=conf.image_size)
        elif "efficientnet" in conf.arch:
            model_ft = EfficientNet(conf, arch_name=conf.arch)

        criterion = [
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none")
        ]
        criterion = [c.to(device) for c in criterion]

        model_ft, val_preds = train_model(train_df,
                                          train_images,
                                          test_df,
                                          test_images,
                                          model_ft,
                                          criterion,
                                          log,
                                          device,
                                          result_dir,
                                          fold=i,
                                          num_epoch=conf.num_epoch)

        torch.save(model_ft.state_dict(), result_dir / f'model_{i}.pkl')
        np.save(result_dir / f'val_preds_{i}.npy', val_preds)
Esempio n. 6
0
class ResNetLift(nn.Module):
    def __init__(self, network, num_joints, num_layers, num_features, mode,
                 model_2d_path, model_lift_path):
        super(ResNetLift, self).__init__()

        # 2d pose estimation module
        self.model_2d = ResNetInt(network, num_joints)
        if model_2d_path is not None:
            if os.path.isfile(model_2d_path):
                print('Load pretrained 2D pose estimation model..')
                state = torch.load(model_2d_path)
                pretrained_dict = state['model']
                model_dict = self.model_2d.state_dict()
                new_pretrained_dict = {
                    k[7:]: v
                    for k, v in pretrained_dict.items() if k[7:] in model_dict
                }
                model_dict.update(new_pretrained_dict)
                self.model_2d.load_state_dict(model_dict)
            else:
                raise ValueError('model does not exist: %s' % model_2d_path)

        # 2d-to-3d pose lifting module
        self.model_lift = ResNet(mode, num_joints, num_layers, num_features)
        if model_lift_path is not None:
            if os.path.isfile(model_lift_path):
                print('Load pretrained 2D pose estimation model..')
                state = torch.load(model_lift_path)
                pretrained_dict = state['model']
                model_dict = self.model_lift.state_dict()
                new_pretrained_dict = {
                    k[7:]: v
                    for k, v in pretrained_dict.items() if k[7:] in model_dict
                }
                model_dict.update(new_pretrained_dict)
                self.model_lift.load_state_dict(model_dict)
            else:
                raise ValueError('model does not exist: %s' % model_lift_path)

    def forward(self, inp):
        [img, bbox, cam_c] = inp

        # 2d prediction
        [H, pred2d] = self.model_2d(img)

        # 3d prediction
        pose2d = pred2d.clone()
        [pose_local, depth_root] = self.model_lift([pose2d, bbox, cam_c])

        return [H, pred2d, pose_local, depth_root]

    def set_fliptest(self, val):
        self.model_2d.set_fliptest(val)
        self.model_lift.set_fliptest(val)
Esempio n. 7
0
def main():

    exp_name = f'baseline_{now()}'
    device, log, result_dir = setup(exp_name, conf)

    train_df = load_csv(conf.train_csv)
    if conf.npy:
        train_images = np.load(conf.train_images)
    else:
        train_images = pd.read_parquet(conf.train_images)

    train_df["gr"] = 0
    train_df["cd"] = 0
    train_df["vd"] = 0
    train_df["image_mean"] = 0

    models = [f"se_resnext50_f{i}.pkl" for i in range(5)]

    preds = np.zeros((len(train_df), conf.gr_size + conf.vd_size + conf.cd_size))
    image_stats = np.zeros((len(train_df), 2))

    log.info('done')
    for i in range(5):

        model = ResNet(conf, arch_name=conf.arch,
                          input_size=conf.image_size)
        model.load_state_dict(torch.load(models[i]))
        model.to(device)

        ds = val_split(train_df, train_images, fold=i)
        _, val_ds, _, val_images = ds['train'], ds['val'], ds['train_images'], ds['val_images']

        test_preds = predict(model, val_ds, val_images, valid_transform,
                             device)

        print(test_preds.shape)
        te_ind = ds['te_ind']
        preds[te_ind] += test_preds
        image_stats[te_ind, 0] = val_images.mean((1, 2))
        image_stats[te_ind, 0] = val_images.std((1, 2))

    preds = np.concatenate([preds, image_stats], axis=1)

    for t in ["grapheme_root", "vowel_diacritic", "consonant_diacritic"]:
        rf = RandomForestClassifier(n_jobs=16)
        # train = xgb.DMatrix(preds, label=train_df[t])
        # params = {"max_depth": 4, "nthread": 16, "objective": "multi:softmax",
        #           "eval_metric": ["merror", "mlogloss"], "num_class": conf.gr_size}
        # xgb.cv(params, train, num_boost_round=1000, nfold=5, seed=conf.seed,
        #        early_stopping_rounds=40, verbose_eval=10)
        rf.fit(preds, train_df[t])
        with open(f"{t}_rf2.pkl", "wb") as f:
            joblib.dump(rf, f)
Esempio n. 8
0
def main():
    loader = prepare_cifar10()
    last_epoch = 0

    # model = GoogleNet(mode='improved', aux=False).to(device)
    model = ResNet(layer_num='50').to(device)
    model_name = model.__class__.__name__ + '_' + model.mode

    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=5e-4)

    if pretrained is not None:
        print('load %s...' % pretrained)

        checkpoint = torch.load(os.path.join('./saved_models', pretrained))
        pattern = r'_[0-9]+\.'
        last_epoch = int(re.findall(pattern, pretrained)[-1][1:-1])
        if device.type == 'cuda':
            load_parallel_state_dict(model, checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

        print('loading pretrained model finished')
    hyperparameters = {
        'batch_size': batch_size,
        'learning_rate': learning_rate,
        'num_epochs': num_epochs,
        'optimizer': optimizer,
        'loss_function': criterion
    }

    settings = {
        'print_every': print_every,
        'verbose': verbose,
        'save_log': is_log,
        'start_epoch': last_epoch + 1,
        'save_model': save_frequency,
        'name': model_name,
        'device': device
    }

    trainer = ResNetTrainer(model, loader, hyperparameters, settings)
    # trainer = GoogleNetTrainer(model, loader, hyperparameters, settings)
    if is_train:
        trainer.train()
    else:
        trainer.test()
Esempio n. 9
0
def resnest18(pretrained=False, root='~/.encoding/models', **kwargs):
    model = ResNet(Bottleneck, [2, 2, 2, 2],
                   radix=2, groups=1, bottleneck_width=64,
                   deep_stem=True, stem_width=32, avg_down=True,
                   avd=True, avd_first=False, **kwargs)
   
    if pretrained:
        # 官方没有提供resnest18的预训练模型,我这里用resnest50的预训练模型加载
        weight = torch.hub.load_state_dict_from_url(
            resnest_model_urls['resnest50'], progress=True, check_hash=True)
        model_dict = model.state_dict()
        for k,v  in weight.items():
            if k in model_dict.keys():
                model_dict[k] = v
        model.load_state_dict(model_dict)
    return model
Esempio n. 10
0
def search_policy():

    model = ResNet(conf, arch_name=conf.arch, input_size=conf.image_size)
    model.load_state_dict(
        torch.load("result/baseline_2020_02_20_14_35_56/model_0.pkl"))

    val_df = pd.read_csv("val_index.csv")
    val_images = np.load("val_images.npy")

    faa = FastAutoAugment(val_df, val_images, model)

    # faa.test()
    study = optuna.create_study()
    study.optimize(faa.search, n_trials=200)

    print(study.best_trial)
    print(study.best_params)
Esempio n. 11
0
def resnet18(pretrained=False):
    """Constructs a ResNet-34 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(BasicBlock, [2, 2, 2, 2])
    if pretrained:
        model_dict = model.state_dict()
        pretrained_dict = model_zoo.load_url(model_urls['resnet18'])
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if not k.startswith("fc")
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        # model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
    return model
Esempio n. 12
0
def main(args):
    transform = getTransforms()

    data_path = os.path.join('data', args.data)
    if not os.path.exists(data_path):
        print('ERROR: No dataset named {}'.format(args.data))
        exit(1)

    testset = BaseDataset(list_path=os.path.join(data_path, 'val.lst'),
                          transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    class_list = getClassList(data_path)

    model = ResNet(num_layers=18, num_classes=len(class_list)).to(DEVICE)
    model.eval()

    output_dir = os.path.join('outputs', args.data)
    model_state_file = os.path.join(output_dir, 'checkpoint.pth.tar')

    model_file = args.model_file
    if len(model_file) == 0:
        model_file = model_state_file

    if os.path.exists(model_file):
        checkpoint = torch.load(model_file)
        if 'state_dict' in checkpoint.keys():
            model.load_state_dict(checkpoint['state_dict'], strict=False)
        else:
            model.load_state_dict(checkpoint, strict=False)
        print('=> loaded {}'.format(model_file))

    else:
        print('model_file "{}" does not exists.'.format(model_file))
        exit(1)

    accuracy = test(model=model,
                    dataloader=testloader,
                    device=DEVICE,
                    classes=class_list)

    print('Accuracy: {:.2f}%'.format(100 * accuracy))
Esempio n. 13
0
def main():
    # for repeatable experiments
    torch.backends.cudnn.enabled = False
    cudnn.benchmark = False
    cudnn.deterministic = True
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)

    # gpus
    gpus = [0]

    noise_h36m = 'result_H36M.pth'
    # --------------------------------------------------------------------
    # test loader for final prediction
    loader_test = torch.utils.data.DataLoader(dataset=H36M17(
        2, 'test', False, False, 2, 0.0, 0.0, noise_h36m),
                                              batch_size=512 * len(gpus),
                                              shuffle=False,
                                              num_workers=conf.num_threads)

    # build models
    #device = torch.device("cuda:1")
    generator = ResNet(3000).cuda()
    generator = nn.DataParallel(generator, device_ids=gpus)
    generator.eval()

    save_dir = '/media/sunwon/Samsung_T5/MeshLifter/demo_meshlifter'  # directory of final model.pth

    file_name = os.path.join(save_dir, 'final_model.pth')
    if os.path.exists(file_name):
        state = torch.load(file_name)
        generator.load_state_dict(state['generator'])
        print('success model loading')
    else:
        print('Doesnt exist!')

    # generate final prediction
    with torch.no_grad():
        test('test', 1, loader_test, generator)
Esempio n. 14
0
def loadModel(data_root, file_list, backbone_net, gpus='0,1,2,3', resume=None):

    if backbone_net == 'MobileFace':
        raise NotImplementedError
    elif backbone_net == 'Res50':
        net = ResNet(10575)
    elif backbone_net == 'Res101':
        net = NotImplementedError
    else:
        print(backbone_net, 'is not available!')

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(loadStateDict(resume))

    net = net.backbone
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    lfw_dataset = LFW(data_root, file_list, transform=transform)
    lfw_loader = torch.utils.data.DataLoader(lfw_dataset,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=2,
                                             drop_last=False)

    return net.eval(), device, lfw_dataset, lfw_loader
Esempio n. 15
0
def get_models(args, train=True, as_ensemble=False, model_file=None, leaky_relu=False):
    models = []
    
    mean = torch.tensor([0.4914, 0.4822, 0.4465], dtype=torch.float32).cuda()
    std = torch.tensor([0.2023, 0.1994, 0.2010], dtype=torch.float32).cuda()
    normalizer = NormalizeByChannelMeanStd(mean=mean, std=std)

    if model_file:
        state_dict = torch.load(model_file)
        if train:
            print('Loading pre-trained models...')
    
    iter_m = state_dict.keys() if model_file else range(args.model_num)

    for i in iter_m:
        if args.arch.lower() == 'resnet':
            model = ResNet(depth=args.depth, leaky_relu=leaky_relu)
        else:
            raise ValueError('[{:s}] architecture is not supported yet...')
        # we include input normalization as a part of the model
        model = ModelWrapper(model, normalizer)
        if model_file:
            model.load_state_dict(state_dict[i])
        if train:
            model.train()
        else:
            model.eval()
        model = model.cuda()
        models.append(model)

    if as_ensemble:
        assert not train, 'Must be in eval mode when getting models to form an ensemble'
        ensemble = Ensemble(models)
        ensemble.eval()
        return ensemble
    else:
        return models
Esempio n. 16
0
def main():
    # for repeatable experiments
    cudnn.benchmark = False
    cudnn.deterministic = True
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)

    # options
    opt = Opts().parse()

    # dataset loader (train)
    if opt.dataset_train == 'h36m':
        train_loader = torch.utils.data.DataLoader(
            H36M17(opt.protocol, 'train', True, opt.scale, opt.noise,
                   opt.std_train, opt.std_test, opt.noise_path),
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=int(conf.num_threads))
    elif opt.dataset_train == 'inf':
        train_loader = torch.utils.data.DataLoader(
            MPIINF('train', opt.noise, opt.std_train, opt.std_test,
                   opt.noise_path),
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=int(conf.num_threads))
    elif opt.dataset_train == 'h36m_inf':
        train_loader = torch.utils.data.DataLoader(H36M17_MPIINF('train', opt),
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=int(
                                                       conf.num_threads))
    else:
        raise ValueError('unsupported dataset %s' % opt.dataset_train)

    # dataset loader (valid)
    if opt.dataset_test == 'h36m':
        val_loader = torch.utils.data.DataLoader(
            H36M17(opt.protocol, 'val', False, False, opt.noise, opt.std_train,
                   opt.std_test),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=int(conf.num_threads))
    elif opt.dataset_test == 'inf':
        val_loader = torch.utils.data.DataLoader(
            MPIINF('val', opt.noise, opt.std_train, opt.std_test),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=int(conf.num_threads))
    else:
        raise ValueError('unsupported dataset %s' % opt.dataset_test)

    # model
    if opt.network == 'resnet':
        model = ResNet(opt.mode, conf.num_joints, opt.num_layers,
                       opt.num_features).cuda()
    else:
        raise ValueError('unsupported model %s' % opt.network)

    # multi-gpu
    if opt.multi_gpu == True:
        model = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
    else:
        model = torch.nn.DataParallel(model, device_ids=[0])

    # optimizer
    if opt.opt_method == 'rmsprop':
        optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.lr)
    elif opt.opt_method == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    else:
        raise ValueError('unsupported optimizer %s' % opt.opt_method)

    # scheduler
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[200, 300],
                                                     gamma=0.1)

    # log
    log = []
    log.append([])  # epoch
    log.append([])  # cost (train)
    log.append([])  # error3d1 (train)
    log.append([])  # error3d2 (train)
    log.append([])  # cost (val)
    log.append([])  # error3d1 (val)
    log.append([])  # error3d2 (val)

    # load model
    idx_start = opt.num_epochs
    while idx_start > 0:
        file_name = os.path.join(opt.save_dir,
                                 'model_{}.pth'.format(idx_start))
        if os.path.exists(file_name):
            state = torch.load(file_name)
            model.load_state_dict(state['model'])
            optimizer.load_state_dict(state['optimizer'])
            scheduler.load_state_dict(state['scheduler'])
            log_name = os.path.join(opt.save_dir,
                                    'log_{}.pkl'.format(idx_start))
            if os.path.exists(log_name):
                with open(log_name, 'rb') as fin:
                    log = pickle.load(fin)
            break
        idx_start -= 1

    # logger
    if idx_start == 0:
        logger = Logger(opt.save_dir + '/logs')
    else:
        logger = Logger(opt.save_dir + '/logs', reset=False)

    # train
    epoch = idx_start + 1
    for epoch in range(idx_start + 1, opt.num_epochs + 1):
        # for repeatable experiments
        np.random.seed(epoch)
        torch.manual_seed(epoch)
        torch.cuda.manual_seed(epoch)

        # do scheduler
        scheduler.step()

        # perform one epoch of training
        cost_train, error3d1_train, error3d2_train = train(
            epoch, opt, train_loader, model, optimizer)
        logger.scalar_summary('cost_train', cost_train, epoch)
        logger.scalar_summary('error3d1_train', error3d1_train, epoch)
        logger.scalar_summary('error3d2_train', error3d2_train, epoch)

        # perform one epoch of validation
        with torch.no_grad():
            cost_val, error3d1_val, error3d2_val = val(epoch, opt, val_loader,
                                                       model)
        logger.scalar_summary('cost_val', cost_val, epoch)
        logger.scalar_summary('error3d1_val', error3d1_val, epoch)
        logger.scalar_summary('error3d2_val', error3d2_val, epoch)

        # print message to log file
        logger.write('%d %1.1e | %.4f %.4f %.4f | %.4f %.4f %.4f\n' %
                     (epoch, optimizer.param_groups[0]['lr'], cost_train,
                      error3d1_train, error3d2_train, cost_val, error3d1_val,
                      error3d2_val))

        #
        log[0].append(epoch)
        log[1].append(cost_train)
        log[2].append(error3d1_train)
        log[3].append(error3d2_train)
        log[4].append(cost_val)
        log[5].append(error3d1_val)
        log[6].append(error3d2_val)

        # save model
        state = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict()
        }
        if epoch % opt.save_intervals == 0:
            torch.save(
                state, os.path.join(opt.save_dir,
                                    'model_{}.pth'.format(epoch)))
            log_name = os.path.join(opt.save_dir, 'log_{}.pkl'.format(epoch))
            with open(log_name, 'wb') as fout:
                pickle.dump(log, fout)

    logger.close()

    # save final model
    file_name = os.path.join(opt.save_dir, 'final_model.pth')
    torch.save(state, file_name)

    # save final log
    log_name = os.path.join(opt.save_dir, 'final_log.pkl')
    with open(log_name, 'wb') as fout:
        pickle.dump(log, fout)

    # plotting
    x = range(1, opt.num_epochs + 1)
    cost_train = np.array(log[1])
    error3d1_train = np.array(log[2])
    error3d2_train = np.array(log[3])
    cost_val = np.array(log[4])
    error3d1_val = np.array(log[5])
    error3d2_val = np.array(log[6])

    fig, ax = plt.subplots()
    ax.plot(x, cost_train, 'r')
    ax.plot(x, cost_val, 'b')
    ax.set(xlabel='epoch', ylabel='cost', title='cost')
    plt.legend(('cost_train', 'cost_val'))
    ax.grid()
    fig.savefig(os.path.join(opt.save_dir, 'cost.png'))

    fig, ax = plt.subplots()
    ax.plot(x, error3d1_train, 'r')
    ax.plot(x, error3d2_train, 'm')
    ax.plot(x, error3d1_val, 'b')
    ax.plot(x, error3d2_val, 'c')
    ax.set(xlabel='epoch', ylabel='error3d', title='3D error (mm)')
    plt.legend(
        ('error3d1_train', 'error3d2_train', 'error3d1_val', 'error3d2_val'))
    ax.grid()
    fig.savefig(os.path.join(opt.save_dir, 'error3d.png'))

    #---------------------------------------------------------------------------
    # dataset loader (test)
    if opt.dataset_test == 'h36m':
        test_loader = torch.utils.data.DataLoader(
            H36M17(opt.protocol, 'test', True, False, opt.noise, opt.std_train,
                   opt.std_test),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=int(conf.num_threads))
    elif opt.dataset_test == 'inf':
        test_loader = torch.utils.data.DataLoader(
            MPIINF('val', opt.noise, opt.std_train, opt.std_test),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=int(conf.num_threads))
    else:
        raise ValueError('unsupported dataset %s' % opt.dataset_test)

    # final evaluation
    with torch.no_grad():
        cost_final, error3d1_final, error3d2_final = test(
            epoch, opt, test_loader, model)
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
    trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)

    testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model")
    model = ResNet(
                num_classes=num_classes,
                depth=args.depth,
                norm_type=args.norm,
                basicblock=args.basicblock,
            )
    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print(model)
    print('    Total params: %.4fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = set_optimizer(model, args)

    # Resume
    title = '{}-ResNet-{}-{}'.format(args.dataset, args.depth, args.norm)
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 
                          'Train Acc.', 'Valid Acc.', 'Train Acc.5', 'Valid Acc.5'])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc, train_acc5 = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc, test_acc5 = test(testloader, model, criterion, epoch, use_cuda)

        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc, train_acc5, test_acc5])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

    logger.close()

    print('Best acc:')
    print(best_acc)
Esempio n. 18
0
    args.cuda = torch.cuda.is_available()
    device = torch.device("cuda" if args.cuda else "cpu")
    seed_torch(args.seed)
    runner_name = os.path.basename(__file__).split(".")[0]
    model_dir= os.path.join(args.exp_root, runner_name)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    args.model_dir = model_dir+'/'+'{}.pth'.format(args.model_name) 

    model = ResNet(BasicBlock, [2,2,2,2], args.num_labeled_classes, args.num_unlabeled_classes).to(device)

    num_classes = args.num_labeled_classes + args.num_unlabeled_classes

    if args.mode=='train':
        state_dict = torch.load(args.warmup_model_dir)
        model.load_state_dict(state_dict, strict=False)
        for name, param in model.named_parameters(): 
            if 'head' not in name and 'layer4' not in name:
                param.requires_grad = False
 
    if args.dataset_name == 'cifar10':
        mix_train_loader = CIFAR10LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))
        labeled_train_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='once', shuffle=True, target_list = range(args.num_labeled_classes))
        unlabeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
        unlabeled_eval_loader_test = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
        labeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes))
        all_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(num_classes))
    elif args.dataset_name == 'cifar100':
        mix_train_loader = CIFAR100LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))
        labeled_train_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='once', shuffle=True, target_list = range(args.num_labeled_classes))
        unlabeled_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
Esempio n. 19
0
# image transformation function
loader = transforms.Compose([transforms.ToTensor()])

# checking if the GPU is available for inference
is_use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if is_use_cuda else "cpu")

# initializing the model
net = ResNet(depth=14, in_channels=1, output=3)
# moving the net to GPU for testing
if is_use_cuda:
    net.to(device)
    net = nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
# loading the network parameters
net.load_state_dict(torch.load("./checkpoints/model.pth"))
net.eval()


def draw_circle(img, row, col, rad):
    rr, cc, val = circle_perimeter_aa(row, col, rad)
    valid = ((rr >= 0) & (rr < img.shape[0]) & (cc >= 0) & (cc < img.shape[1]))
    img[rr[valid], cc[valid]] = val[valid]


def noisy_circle(size, radius, noise):
    img = np.zeros((size, size), dtype=np.float)

    # Circle
    row = np.random.randint(size)
    col = np.random.randint(size)
Esempio n. 20
0
def main(db):
    if db == 'h36m':
        loader_val = torch.utils.data.DataLoader(dataset=H36M17(
            2, 'test', False, False, 2, 0, 0, noise_h36_val),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=0)
    model = ResNet(3000).cuda()
    model = nn.DataParallel(model, device_ids=[0])

    #last full model

    save_dir = '/media/sunwon/Samsung_T5/MeshLifter/demo_meshlifter/'

    filename = '%s/final_model.pth' % (save_dir)
    state = torch.load(filename)
    model.load_state_dict(state['generator'], strict=False)
    model.eval()

    for i, data in enumerate(loader_val):
        if np.mod(i, 1) == 0:
            pose2d = data['pose2d'].float().to("cuda")
            print(pose2d)
            bbox = data['bbox'].float().to("cuda")
            pose3d = data['pose3d'].float().to("cuda")
            rot = data['rot'].to("cuda").detach()
            rot_inv = data['rot_inv'].to("cuda").detach()
            img = data['img'].detach().cpu().numpy().squeeze()
            #img = cv2.resize(img, (256,256))
            meta2d = pose2d[0].clone()

            faces = model.module.smpl.faces
            pose2d_in = _normalize_pose(pose2d)
            rot = rot[0].detach().cpu().numpy()

            generator_output = model(pose2d_in)
            (thetas_out, verts_out, pose3d_out) = generator_output

            pose3d = pose3d.detach().cpu().numpy().squeeze()
            pose3d_out = pose3d_out.detach().cpu().numpy().squeeze()

            verts = verts_out[0].detach().cpu().numpy()
            pose2d_rot = pose3d_out[:, :2]

            pose2d = pose2d[0].detach().cpu().numpy()

            pose2d, mean, std = normalize_np_pose(pose2d)
            pose2d_rot, a, b = normalize_np_pose(pose2d_rot)
            pose2d_rot = pose2d_rot * std + mean
            vertex_color = np.ones([verts.shape[0], 4]) * [0.8, 0.8, 0.8, 1.0]
            tri_mesh = trimesh.Trimesh(verts,
                                       faces,
                                       vertex_colors=vertex_color)

            pts_color = np.ones([pose3d_out.shape[0], 4])
            pts3d = pyrender.Mesh.from_points(pose3d_out, colors=pts_color)

            mesh = pyrender.Mesh.from_trimesh(tri_mesh)
            scene = pyrender.Scene()
            scene.add(mesh)

            pose2d = pose2d[0]  # batch 14 2  -> 14 2
            len = pose2d.shape[0]

            ori_img = img.copy()

            #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            img_2d = draw_skeleton(img, meta2d)
            print(meta2d)
            ori_img = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
            cv2.imshow('original', img_2d)

            # cv2.imshow('mesh', rn.r)
            k = cv2.waitKey(0)
            if k == 27:
                cv2.destroyAllWindows()
                pdb.set_trace()
                break
            elif k == ord('s'):
                cv2.destroyAllWindows()

            pyrender.Viewer(scene, use_raymond_lighting=True)

            verts = np.expand_dims(verts, axis=0)
            joints = np.expand_dims(pose3d_out, axis=0)
Esempio n. 21
0
def main(args):
    transform = getTransforms()

    data_path = os.path.join('data', args.data)
    if not os.path.exists(data_path):
        print('ERROR: No dataset named {}'.format(args.data))
        exit(1)

    trainset = BaseDataset(list_path=os.path.join(data_path, 'train.lst'),
                           transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.train_batch,
                                              shuffle=True,
                                              num_workers=1)

    testset = BaseDataset(list_path=os.path.join(data_path, 'val.lst'),
                          transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.val_batch,
                                             shuffle=True,
                                             num_workers=1)

    model = ResNet(num_layers=18,
                   num_classes=args.num_classes,
                   pretrained=True).to(DEVICE)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    summary(model, input_size=(3, 32, 32))

    max_epoch = args.max_epoch
    last_epoch = 0
    best_val_loss = None
    best_accuracy = None
    train_losses = []
    val_losses = []
    accuracies = []

    output_dir = os.path.join('outputs', args.data)
    model_state_file = os.path.join(output_dir, 'checkpoint.pth.tar')
    os.makedirs(output_dir, exist_ok=True)

    if os.path.exists(model_state_file):
        checkpoint = torch.load(model_state_file)
        last_epoch = checkpoint['epoch']
        best_val_loss = checkpoint['best_val_loss']
        best_accuracy = checkpoint['best_accuracy']
        train_losses = checkpoint['train_losses']
        val_losses = checkpoint['val_losses']
        accuracies = checkpoint['accuracies']
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('=> loaded checkpoint (epoch {})'.format(last_epoch))

    for epoch in range(last_epoch, max_epoch):
        print('Epoch {}'.format(epoch))

        train_loss = train(model=model,
                           dataloader=trainloader,
                           criterion=criterion,
                           optimizer=optimizer,
                           device=DEVICE)
        val_loss = val(model=model,
                       dataloader=testloader,
                       criterion=criterion,
                       device=DEVICE)
        accuracy = test(model=model, dataloader=testloader, device=DEVICE)

        train_losses.append(train_loss)
        val_losses.append(val_loss)
        accuracies.append(accuracy)

        print('Loss: train = {}, val = {}, acc. = {}'.format(
            train_loss, val_loss, accuracy))

        # if best_val_loss is None or val_loss < best_val_loss:
        #     best_val_loss = val_loss
        #     torch.save(
        #         model.state_dict(),
        #         os.path.join(output_dir, 'best.pth')
        #     )
        if best_accuracy is None or accuracy > best_accuracy:
            best_accuracy = accuracy
            torch.save(model.state_dict(),
                       os.path.join(output_dir, 'best.pth'))

        print('=> saving checkpoint to {}'.format(model_state_file))
        torch.save(
            {
                'epoch': epoch + 1,
                'best_val_loss': best_val_loss,
                'best_accuracy': best_accuracy,
                'train_losses': train_losses,
                'val_losses': val_losses,
                'accuracies': accuracies,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, model_state_file)

        if (epoch + 1) % 100 == 0:
            # plt.plot(range(epoch+1), train_losses, label="train")
            # plt.plot(range(epoch+1), val_losses, label="val")
            # plt.yscale('log')
            # plt.legend()
            # plt.savefig(os.path.join(output_dir, 'losses.png'))
            # plt.clf()

            fig, ax1 = plt.subplots()
            ax2 = ax1.twinx()
            ax1.plot(range(epoch + 1), train_losses, label='train')
            ax1.plot(range(epoch + 1), val_losses, label='val')
            ax1.set_xscale('log')
            ax1.set_yscale('log')
            ax2.plot(range(epoch + 1),
                     accuracies,
                     color='red',
                     label='accuracy')
            ax2.set_xscale('log')
            handler1, label1 = ax1.get_legend_handles_labels()
            handler2, label2 = ax2.get_legend_handles_labels()
            ax1.legend(handler1 + handler2,
                       label1 + label2,
                       loc=3,
                       borderaxespad=0.)
            plt.savefig(os.path.join(output_dir, 'losses.png'))
            plt.clf()
Esempio n. 22
0
# Gpu or cpu device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Model
# model = Shallow()
# model = LargeResNet()
model = ResNet()
model = model.to(device)

# Load saved model parameters (if pre-trained)
if not train_mode:
    map_loc = "cuda:0" if torch.cuda.is_available() else "cpu"
    state_dict = torch.load(os.path.join(weight_path, "resnet_lr4_ep80"),
                            map_location=map_loc)
    model.load_state_dict(state_dict)

# Loss function
criterion = nn.CrossEntropyLoss()
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
# Learning rate scheduler
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)


# Train one epoch
def train(epoch):
    model.train()
    train_loss = 0
    for data, label in tqdm(train_loader):
        data = data.to(device)