def controller_train(self, tepoch=config.epoch):
        # val_data = np.genfromtxt(VAL_DATA_PATH)
        # train_data = np.genfromtxt(TRAIN_DATA_PATH)
        val_data = np.loadtxt(VAL_DATA_PATH, delimiter=',')
        train_data = np.loadtxt(TRAIN_DATA_PATH, delimiter=',')
        train_data = np.reshape(train_data, [-1, train_data.shape[1], 1])
        val_data = np.reshape(val_data, [-1, val_data.shape[1], 1])

        # val_data = np.load(VAL_DATA_PATH)
        # train_data = np.load(TRAIN_SHUFFLE_DATA_PATH)

        last_save_epoch = self.base_epoch
        global_epoch = self.base_epoch + 1

        if last_save_epoch >= 0:
            self.restore_model(path=self.model_save_dir,
                               global_step=last_save_epoch)
            # tl.files.save_npz_dict(self.model.train_net.all_params, name=self.model_save_dir + "%d.npz" % global_epoch, sess=self.sess)
            # return

        # logger_train = log.Logger(columns=["mae_copy", "loss", "nmse_train", "nmse", "mse", "mae", "mape", "lossv", "nmse_test", "nmsev", "msev", "maev", "mapev"])
        logger_train = log.Logger(columns=[
            "mae_copy", "loss", "nmse_train", "nmse", "rmse", "mae", "mape"
        ])
        logger_valid = log.Logger(columns=[
            "mae_copy", "lossv", "nmse_test", "nmsev", "rmsev", "maev", "mapev"
        ])
        # logger_test = log.Logger(columns=["mae_copy", "lossv", "nmse_test", "nmsev", "msev", "maev", "mapev"] + list(range(15, 121, 15)))
        # logger_test = log.Logger(columns=["mapev"] + list(range(15, 121, 15)))

        for epoch in range(tepoch + 1):
            # for epoch in range(1):

            self.__train__(global_epoch, train_data, logger_train)

            if epoch % config.test_p_epoch == 0:
                self.__valid__(global_epoch, val_data, logger_valid)
                # self.__test__(global_epoch, root_data[:, -config.valid_length:, :], logger_test, pathlist)
            '''
            if global_epoch > self.base_epoch and global_epoch % config.save_p_epoch == 0:
                self.save_model(
                    path=self.model_save_dir,
                    global_step=global_epoch
                )
                last_save_epoch = global_epoch
            '''

            logger_train.save(self.log_save_dir + config.global_start_time +
                              "_train.csv")
            logger_valid.save(self.log_save_dir + config.global_start_time +
                              "_valid.csv")
            # logger_test.save(self.log_save_dir + config.global_start_time + "_test.csv")

            global_epoch += 1
示例#2
0
    def __init__(self, config_file=None, default_config=None):
        """
        Load a config stored in the given file.
        @param config_file: the config filename to read
        @type config_file:  string or None. None implies to use
                            CONFIG_DIR/CONFIG_FILE
        """
        self._logger = log.Logger()

        self.first_load = False

        config_dir = None

        if config_file is not None:
            if not os.path.exists(config_file) or not open(config_file).read():
                config_file = self._create_config(config_file, default_config)

        self._filename = config_file

        try:
            self._config = ConfigObj(config_file,
                                     unrepr=True,
                                     list_values=True)
        except Exception, ex:
            errors = [error.msg for error in ex.errors]
            errors = ';'.join(errors)
    def controller_test(self):
        # root_data,pathlist  = dataloader.load_data_all()
        root_data, neighbour_data, pathlist = dataloader.load_data(5, 5)
        del neighbour_data
        # event_data = dataloader.load_event_data()

        last_save_epoch = self.base_epoch
        global_epoch = self.base_epoch + 1

        assert last_save_epoch >= 0
        self.restore_model(path=self.model_save_dir,
                           global_step=last_save_epoch)

        # logger_test = log.Logger(columns=["mae_copy", "lossv", "nmse_test", "nmsev", "msev", "maev", "mapev"] + list(range(15, 121, 15)))
        logger_test = log.Logger(columns=["mapev"] + list(range(15, 121, 15)))

        self.__test__(global_epoch,
                      root_data[:, -config.valid_length:, :],
                      logger_test,
                      pathlist,
                      test_interval=1)
        # self.__test_event__(global_epoch, root_data[:, -config.valid_length:, :], event_data, logger_test, pathlist, test_interval=1)

        logger_test.save(self.log_save_dir + config.global_start_time +
                         "_test.csv")
    def controller_train(self, tepoch=config.epoch):
        root_data, neighbour_data, pathlist = dataloader.load_data(5, 5)

        last_save_epoch = self.base_epoch
        global_epoch = self.base_epoch + 1

        if last_save_epoch >= 0:
            self.restore_model(path=self.model_save_dir,
                               global_step=last_save_epoch)

        logger_train = log.Logger(columns=[
            "mae_copy", "loss", "nmse_train", "nmse", "rmse", "mae", "mape"
        ])
        # logger_valid = log.Logger(columns=["mae_copy", "lossv", "nmse_test", "nmsev", "msev", "maev", "mapev"])
        # logger_test = log.Logger(columns=["mae_copy", "lossv", "nmse_test", "nmsev", "msev", "maev", "mapev"] + list(range(15, 121, 15)))
        logger_test = log.Logger(columns=["mapev"] + list(range(15, 121, 15)))

        for epoch in range(tepoch + 1):

            self.__train__(global_epoch,
                           root_data[:, :-config.valid_length, :],
                           neighbour_data[:, :-config.valid_length, :],
                           logger_train)

            if epoch % config.test_p_epoch == 0:
                # self.__valid__(global_epoch, root_data[:, -config.valid_length:, :], neighbour_data[:, -config.valid_length:, :], logger_valid)
                self.__test__(global_epoch,
                              root_data[:, -config.valid_length:, :],
                              neighbour_data[:, -config.valid_length:, :],
                              logger_test, pathlist)

            if global_epoch > self.base_epoch and global_epoch % config.save_p_epoch == 0:
                self.save_model(path=self.model_save_dir,
                                global_step=global_epoch)
                last_save_epoch = global_epoch

            logger_train.save(self.log_save_dir + config.global_start_time +
                              "_train.csv")
            # logger_valid.save(self.log_save_dir + config.global_start_time + "_valid.csv")
            logger_test.save(self.log_save_dir + config.global_start_time +
                             "_test.csv")

            global_epoch += 1
示例#5
0
def main(args):
    # logger file
    str_time = time.strftime('%Y-%m-%d')
    log_file = os.path.join(args.logDir, 'train_{}.log'.format(str_time))
    logging.basicConfig(filename=log_file,
                        filemode='w',
                        format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)
    warnings.filterwarnings('ignore')
    logging.info('>>> start')

    # config
    config = cfg.load_config(args.config)
    start_epoch = 0
    glob_step = 0
    pck_best = -1
    lr_now = config.TRAIN.LR
    lr_init = config.TRAIN.LR
    # hyper parameter
    data_dir = args.dataDir

    # create and init model
    logging.info(">>> creating  the backbone of model")
    backbone = pose_resnet.get_pose_net(config, is_train=True)

    # the backbone has already been pretrained
    # critrion and optimizer are all for distillator_net
    criterion_test = nn.MSELoss(size_average=True, reduction='sum').cuda()
    criterion = nn.CrossEntropyLoss(size_average=True).cuda()
    optim_params = list(backbone.parameters())
    optimizer = optim.Adam(optim_params, lr=config.TRAIN.LR)

    # load ckpt
    if args.load:
        logging.info(">>> loading ckpt from '{}'".format(args.load))
        ckpt = torch.load(args.load)
        start_epoch = ckpt['epoch']
        pck_all = ckpt['pck']
        glob_step = ckpt['step']
        lr_now = 0.0001
        new_pretrained_state_dict = {}
        prefix = "module."
        for k, v in ckpt['state_dict_backbone'].items():
            new_pretrained_state_dict[k.replace(prefix, "")] = v
        backbone.load_state_dict(new_pretrained_state_dict)
        #  for finetune
        backbone.fc2 = nn.Linear(512, 50)
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        backbone.to(device)
        optimizer.load_state_dict(ckpt['optimizer'])
        for param_group in optimizer.param_groups:
            param_group['lr'] = 0.0001
        logging.info(">>> ckpt loaded (epoch: {} |pck:{})".format(
            start_epoch, pck_all))
    if args.resume:
        logger = log.Logger(os.path.join(args.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(args.ckpt, 'log.txt'))
        logger.set_names(['epoch', 'lr', 'loss_train', 'loss_test', 'pck'])

    # choice num of GPU
    device_ids = [0, 1, 2, 3]
    if args.cuda:
        backbone = backbone.cuda(device_ids[0])
        backbone = nn.DataParallel(backbone,
                                   device_ids=device_ids)  # multi-Gpu

    # data loading
    logging.info(">>> loading data")

    if args.test:
        test_loader = DataLoader(dataset=PennDataTest(data_dir=data_dir,
                                                      config=config),
                                 batch_size=config.TEST.BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=config.TEST.JOBS,
                                 pin_memory=True,
                                 drop_last=True)
        loss_test, pck_num, label_num = test(test_loader, backbone,
                                             criterion_test, config)
        logging.info(">>>>>> TEST results:")
        logging.info(">>>\nERRORS: {}, pck_num:{}, label_num:{}".format(
            loss_test, pck_num, label_num))
        sys.exit()

    # load dataset for training
    # Build dataset
    train_data = PennDataFinetune(config=config,
                                  data_dir=data_dir,
                                  is_train=True)
    test_data = PennDataFinetune(config=config,
                                 data_dir=data_dir,
                                 is_train=False)
    logging.info('Train dataset total number' + str(len(train_data)))
    logging.info('Test dataset total number' + str(len(test_data)))

    # Data Loader
    train_loader = DataLoader(dataset=train_data,
                              batch_size=2,
                              shuffle=False,
                              num_workers=config.TRAIN.JOBS,
                              pin_memory=True,
                              drop_last=True)
    test_loader = DataLoader(dataset=test_data,
                             batch_size=1,
                             shuffle=False,
                             num_workers=config.TEST.JOBS,
                             pin_memory=True,
                             drop_last=True)
    logging.info("dataset loaded!")

    # Increased operating efficiency
    cudnn.benchmark = True

    for epoch in range(start_epoch, config.TRAIN.END_EPOCH):
        logging.info('epoch:' + str(epoch))

        # per epoch
        glob_step, lr_now, loss_train = train(train_loader,
                                              backbone,
                                              criterion,
                                              optimizer,
                                              config.TRAIN.BATCH_SIZE,
                                              lr_init=lr_init,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=config.TRAIN.LR_DECAY,
                                              max_norm=args.max_norm)
        lr_init = lr_now
        loss_train = loss_train.item()
        loss_test, pck_num, label_num = test(test_loader, backbone,
                                             criterion_test, config)
        loss_test = loss_test.item()
        # update log file
        pck_epoch = pck_num / label_num
        logging.info('label_num:{}'.format(label_num))
        logging.info("pck:{}".format(pck_epoch))

        logger.append([epoch + 1, lr_now, loss_train, loss_test, pck_epoch],
                      ['int', 'float', 'float', 'float', 'float'])

        is_best = pck_epoch > pck_best
        pck_best = max(pck_epoch, pck_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'pck': pck_epoch,
                    'state_dict_backbone': backbone.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=args.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'pck': pck_epoch,
                    'state_dict_backbone': backbone.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=args.ckpt,
                is_best=False)
    logger.close()
def main(opt):
    start_epoch = 0
    acc_best = 0.
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)
    tb_logdir = f'./exp/{opt.name}'
    if os.path.exists(tb_logdir):
        shutil.rmtree(tb_logdir)
    writer = SummaryWriter(log_dir=f'./exp/{opt.name}')
    exp_dir_ = dirname(opt.load)

    # create model
    print(">>> creating model")
    # TODO: This is how to avoid weird data reshaping for non-3-channel inputs.
    # Have ResNet model take in grayscale rather than RGB
    #    model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
    if opt.arch == 'cnn':
        model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=opt.num_classes)
    else:
        model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        acc_best = ckpt['acc']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | acc: {})".format(
            start_epoch, acc_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names([
            'epoch', 'lr', 'loss_train', 'err_train', 'acc_train', 'loss_test',
            'err_test', 'acc_test'
        ])

    transforms = [
        ToTensor(),
    ]

    train_datasets = []
    for dataset_name in opt.train_datasets:
        train_datasets.append(
            ClassificationDataset(name=dataset_name,
                                  num_kpts=opt.num_kpts,
                                  transforms=transforms,
                                  split='train',
                                  arch=opt.arch,
                                  gt=opt.gt))
    train_dataset = ConcatDataset(train_datasets)
    train_loader = DataLoader(train_dataset,
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job)

    split = 'test' if opt.test else 'valid'

    test_dataset = ClassificationDataset(name=opt.test_dataset,
                                         num_kpts=opt.num_kpts,
                                         transforms=transforms,
                                         split=split,
                                         arch=opt.arch,
                                         gt=opt.gt)

    test_loader = DataLoader(test_dataset,
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job)

    subset_loaders = {}
    for subset in test_dataset.create_subsets():
        subset_loaders[subset.split] = DataLoader(subset,
                                                  batch_size=opt.test_batch,
                                                  shuffle=False,
                                                  num_workers=opt.job)

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        torch.cuda.empty_cache()
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        if not opt.test:
            glob_step, lr_now, loss_train, err_train, acc_train = \
                    train(train_loader, model, criterion, optimizer,
                            num_kpts=opt.num_kpts, num_classes=opt.num_classes,
                            lr_init=opt.lr, lr_now=lr_now, glob_step=glob_step,
                            lr_decay=opt.lr_decay, gamma=opt.lr_gamma,
                            max_norm=opt.max_norm)

        loss_test, err_test, acc_test, auc_test, prec_test = \
                test(test_loader, model, criterion, num_kpts=opt.num_kpts,
                        num_classes=opt.num_classes, batch_size=opt.test_batch)

        ## Test subsets ##
        subset_losses = {}
        subset_errs = {}
        subset_accs = {}
        subset_aucs = {}
        subset_precs = {}
        subset_openpose = {}
        subset_missing = {}
        subset_grids = {}

        if len(subset_loaders) > 0:
            bar = Bar('>>>', fill='>', max=len(subset_loaders))

        for key_idx, key in enumerate(subset_loaders):
            loss_sub, err_sub, acc_sub, auc_sub, prec_sub = test(
                subset_loaders[key],
                model,
                criterion,
                num_kpts=opt.num_kpts,
                num_classes=opt.num_classes,
                batch_size=4,
                log=False)

            subset_losses[key] = loss_sub
            subset_errs[key] = err_sub
            subset_accs[key] = acc_sub
            subset_aucs[key] = auc_sub
            subset_precs[key] = prec_sub

            sub_dataset = subset_loaders[key].dataset
            if sub_dataset.gt_paths is not None:
                gt_X = load_gt(sub_dataset.gt_paths)
                subset_openpose[key] = mpjpe_2d_openpose(sub_dataset.X, gt_X)
                subset_missing[key] = mean_missing_parts(sub_dataset.X)
            else:
                subset_openpose[key] = 0.
                subset_missing[key] = 0.

            sample_idxs = extract_tb_sample(subset_loaders[key],
                                            model,
                                            batch_size=opt.test_batch)
            sample_X = sub_dataset.X[sample_idxs]
            sample_img_paths = [sub_dataset.img_paths[x] for x in sample_idxs]
            if opt.arch == 'cnn':
                subset_grids[key] = create_grid(sample_X, sample_img_paths)

            bar.suffix = f'({key_idx+1}/{len(subset_loaders)}) | {key}'
            bar.next()

        if len(subset_loaders) > 0:
            bar.finish()
        ###################

        if opt.test:
            subset_accs['all'] = acc_test
            subset_aucs['all'] = auc_test
            subset_precs['all'] = prec_test
            report_dict = {
                'acc': subset_accs,
                'auc': subset_aucs,
                'prec': subset_precs
            }

            report_idx = 0
            report_path = f'report/{opt.name}-{report_idx}.json'
            while os.path.exists(f'report/{opt.name}-{report_idx}.json'):
                report_idx += 1
            report_path = f'report/{opt.name}-{report_idx}.json'

            print(f'>>> Saving report to {report_path}...')
            with open(report_path, 'w') as acc_f:
                json.dump(report_dict, acc_f, indent=4)

            print('>>> Exiting (test mode)...')
            break

        # update log file
        logger.append([
            epoch + 1, lr_now, loss_train, err_train, acc_train, loss_test,
            err_test, acc_test
        ], [
            'int', 'float', 'float', 'float', 'float', 'float', 'float',
            'float'
        ])

        # save ckpt
        is_best = acc_test > acc_best
        acc_best = max(acc_test, acc_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'acc': acc_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'acc': acc_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

        writer.add_scalar('Loss/train', loss_train, epoch)
        writer.add_scalar('Loss/test', loss_test, epoch)
        writer.add_scalar('Error/train', err_train, epoch)
        writer.add_scalar('Error/test', err_test, epoch)
        writer.add_scalar('Accuracy/train', acc_train, epoch)
        writer.add_scalar('Accuracy/test', acc_test, epoch)
        for key in subset_losses:
            writer.add_scalar(f'Loss/Subsets/{key}', subset_losses[key], epoch)
            writer.add_scalar(f'Error/Subsets/{key}', subset_errs[key], epoch)
            writer.add_scalar(f'Accuracy/Subsets/{key}', subset_accs[key],
                              epoch)
            writer.add_scalar(f'OpenPose/Subsets/{key}', subset_openpose[key],
                              epoch)
            writer.add_scalar(f'Missing/Subsets/{key}', subset_missing[key],
                              epoch)
            if opt.arch == 'cnn':
                writer.add_images(f'Subsets/{key}',
                                  subset_grids[key],
                                  epoch,
                                  dataformats='NHWC')

    logger.close()
    writer.close()
示例#7
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))

    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt["epoch"]
        err_best = ckpt["err"]
        glob_step = ckpt["step"]
        lr_now = ckpt["lr"]
        model.load_state_dict(ckpt["state_dict"])
        optimizer.load_state_dict(ckpt["optimizer"])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"))
        logger.set_names(
            ["epoch", "lr", "loss_train", "loss_test", "err_test"])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, "stat_3d.pth.tar"))
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(
                dataset=Human36M(
                    actions=action,
                    data_path=opt.data_dir,
                    use_hg=opt.use_hg,
                    is_train=False,
                ),
                batch_size=opt.test_batch,
                shuffle=False,
                num_workers=opt.job,
                pin_memory=True,
            )
            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action), end="\t")
        print("\n")
        for err in err_set:
            print("{:.4f}".format(err), end="\t")
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg,
                         is_train=False),
        batch_size=opt.test_batch,
        shuffle=False,
        num_workers=opt.job,
        pin_memory=True,
    )
    train_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg),
        batch_size=opt.train_batch,
        shuffle=True,
        num_workers=opt.job,
        pin_memory=True,
    )
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print("==========================")
        print(">>> epoch: {} | lr: {:.5f}".format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm,
        )
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_3d,
                                   procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test],
            ["int", "float", "float", "flaot", "float"],
        )

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=True,
            )
        else:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=False,
            )

    logger.close()
示例#9
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    manual_seed = 1234
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)

    # save options
    log.save_options(opt, opt.ckpt)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.to(device)
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    # test
    if opt.test:
        refine_dic, refine_per_action, coeff_funs, refine_extra_kwargs = ru.get_refine_config(
            opt)
        pck_thresholds = [50, 100, 150, 200, 250]
        noise_fun = lambda x: add_gaussion_noise(x, percent=opt.noise_level)

        err_set = []
        pck_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(dataset=Human36M(actions=action,
                                                      data_path=opt.data_dir,
                                                      use_hg=opt.use_hg,
                                                      is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     pin_memory=True)

            refine_idx_action = ru.get_idx_action(action)
            if refine_per_action:
                refine_dic_i = refine_dic[refine_idx_action]
            else:
                refine_dic_i = refine_dic
            coeff_fun_i = coeff_funs[refine_idx_action]
            _, err_test, pck_test = test(
                test_loader,
                model,
                criterion,
                stat_3d,
                device,
                procrustes=opt.procrustes,
                noise_fun=noise_fun,
                pck_thresholds=pck_thresholds,
                refine_dic=refine_dic_i,
                refine_coeff_fun=coeff_fun_i,
                refine_extra_kwargs=refine_extra_kwargs,
                cache_prefix=action if opt.dump_err else None)
            err_set.append(err_test)
            pck_set.append(pck_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action[:7]), end='\t')
        print("\n")
        for err in err_set:
            print("{:7.4f}".format(err), end='\t')
        print(">>> ERRORS: {}".format(np.array(err_set).mean()))

        for i, thres in enumerate(pck_thresholds):
            for pck in pck_set:
                print("{:7.4f}".format(pck[i]), end='\t')
            print(">>> PCKS {}: {}".format(
                thres, np.mean([pck[i] for pck in pck_set])))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(dataset=Human36M(actions=actions,
                                              data_path=opt.data_dir,
                                              use_hg=opt.use_hg,
                                              is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)
    train_loader = DataLoader(dataset=Human36M(actions=actions,
                                               data_path=opt.data_dir,
                                               use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              device,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        loss_test, err_test, pck_test = test(test_loader,
                                             model,
                                             criterion,
                                             stat_3d,
                                             device,
                                             procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test, pck_test],
            ['int', 'float', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
示例#10
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = CVAE_Linear(opt.cvaeSize, opt.latent_size, opt.numSamples_train,
                        opt.alpha, opt.cvae_num_stack)
    model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names([
            'epoch', 'lr', 'loss_train', 'loss_test', 'err_mean',
            'err_bestsamp'
        ])

    # list of action(s)
    actions = utils.define_actions('All')
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.pt'))
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.pt'))

    # test
    if opt.test:
        err_mean_set, err_bestsamp_set, err_ordsamp_weighted_set, err_ordsamp_weighted_set_pred = [], [], [], []

        for action in actions:
            print("\n>>> TEST on _{}_".format(action))
            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                is_train=False,
                procrustes=opt.procrustes),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_mean_test, err_bestsamp_test, err_ordsamp_weighted_test, err_ordsamp_weighted_test_pred = test_multiposenet(
                test_loader,
                model,
                criterion,
                stat_3d,
                stat_2d,
                procrustes=opt.procrustes)

            err_mean_set.append(err_mean_test)
            err_bestsamp_set.append(err_bestsamp_test)
            err_ordsamp_weighted_set.append(err_ordsamp_weighted_test)
            err_ordsamp_weighted_set_pred.append(
                err_ordsamp_weighted_test_pred)

        err_ordsamp_weighted_set_all = np.stack(err_ordsamp_weighted_set,
                                                axis=1)
        err_ordsamp_weighted_set_pred_all = np.stack(
            err_ordsamp_weighted_set_pred, axis=1)
        err_ordsamp_weighted_set_all = np.mean(err_ordsamp_weighted_set_all,
                                               axis=1)
        err_ordsamp_weighted_set_pred_all = np.mean(
            err_ordsamp_weighted_set_pred_all, axis=1)

        best_temp_gt, best_val = np.argmin(
            err_ordsamp_weighted_set_all), np.min(err_ordsamp_weighted_set_all)
        best_temp_pred, best_val_pred = np.argmin(
            err_ordsamp_weighted_set_pred_all), np.min(
                err_ordsamp_weighted_set_pred_all)

        # print('Gt best temp : {:1f}, best val : {:.4f}'.format((best_temp_gt + 1) * 0.1, best_val))
        # print('Pred best temp : {:1f}, best val : {:.4f}'.format((best_temp_pred + 1) * 0.1, best_val_pred))

        err_ordsamp_weighted_set = np.stack(err_ordsamp_weighted_set,
                                            axis=1)[best_temp_gt]
        err_ordsamp_weighted_set_pred = np.stack(err_ordsamp_weighted_set_pred,
                                                 axis=1)[best_temp_pred]

        print("\n\n>>>>>> TEST results:")
        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_mean_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS - Mean : {:.4f}".format(
            np.array(err_mean_set).mean()))

        for err in err_ordsamp_weighted_set_pred:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS - OrdinalScore ( PRED Ordinals ) : {:.4f}".format(
            np.array(err_ordsamp_weighted_set_pred).mean()))

        for err in err_ordsamp_weighted_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS - OrdinalScore ( GT Ordinals ) : {:.4f}".format(
            np.array(err_ordsamp_weighted_set).mean()))

        for err in err_bestsamp_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS - Oracle : {:.4f}".format(
            np.array(err_bestsamp_set).mean()))

        sys.exit()

    # load dadasets for training
    train_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         procrustes=opt.procrustes),
        batch_size=opt.train_batch,
        shuffle=True,
        num_workers=opt.job,
    )

    test_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         is_train=False,
                         procrustes=opt.procrustes),
        batch_size=opt.test_batch,
        shuffle=False,
        num_workers=opt.job,
    )

    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        glob_step, lr_now, loss_train = train_multiposenet(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm)
        loss_test, err_mean, err_bestsamp, _, _ = test_multiposenet(
            test_loader,
            model,
            criterion,
            stat_3d,
            stat_2d,
            procrustes=opt.procrustes)

        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_mean, err_bestsamp],
            ['int', 'float', 'float', 'float', 'float', 'float'])

        is_best = err_bestsamp < err_best
        err_best = min(err_bestsamp, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
示例#11
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.out_dir)

    # create and initialise model
    # parents = [1, 2, 7, 7, 5, 7, 5, -1, 8, 7, 7, 10, 7]
    # assert len(parents) == 13
    # adj = adj_mx_from_skeleton(13, parents)

    model = LinearModel(
        input_size=26,
        output_size=39,
        linear_size=opt.linear_size,
        num_stage=opt.num_stage,
        p_dropout=opt.dropout,
    )
    # groups = [[2, 3], [5, 6], [1, 4], [0, 7], [8, 9], [14, 15], [11, 12], [10, 13]]
    # model = SemGCN(adj, 128, num_layers=4, p_dropout=0.0, nodes_group=None)

    # model = SemGCN()
    model = model.cuda()
    model.apply(weight_init)
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # load pretrained ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt["epoch"]
        err_best = ckpt["err"]
        glob_step = ckpt["step"]
        lr_now = ckpt["lr"]
        model.load_state_dict(ckpt["state_dict"])
        optimizer.load_state_dict(ckpt["optimizer"])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    if opt.test:
        log_file = "log_test.txt"
    else:
        log_file = "log_train.txt"
    if opt.resume:
        logger = log.Logger(os.path.join(opt.out_dir, log_file), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.out_dir, log_file))
        logger.set_names(
            ["epoch", "lr", "loss_train", "loss_test", "err_test"])

    # data loading
    print("\n>>> loading data")
    stat_3d = torch.load(os.path.join(opt.data_dir, "stat_3d.pth.tar"))

    # test
    if opt.test:
        test_loader = DataLoader(
            dataset=data_loader(data_path=opt.data_dir, is_train=False),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=opt.job,
            pin_memory=True,
        )

        loss_test, err_test, joint_err, all_err, outputs, targets, inputs = test(
            test_loader, model, criterion, stat_3d)

        print(os.path.join(opt.out_dir, "test_results.pth.tar"))
        torch.save(
            {
                "loss": loss_test,
                "all_err": all_err,
                "test_err": err_test,
                "joint_err": joint_err,
                "output": outputs,
                "target": targets,
                "input": inputs,
            },
            open(os.path.join(opt.out_dir, "test_results.pth.tar"), "wb"),
        )

        # print("train {:.4f}".format(err_train), end="\t")
        print("test {:.4f}".format(err_test), end="\t")
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(
        dataset=data_loader(data_path=opt.data_dir, is_train=False),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.job,
        pin_memory=True,
    )

    train_loader = DataLoader(
        dataset=data_loader(data_path=opt.data_dir,
                            is_train=True,
                            noise=opt.noise),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.job,
        pin_memory=True,
        drop_last=False,
    )

    # loop through epochs
    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print("==========================")
        print(">>> epoch: {} | lr: {:.10f}".format(epoch + 1, lr_now))

        # train
        glob_step, lr_now, loss_train = train(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm,
        )

        loss_test, err_test, _, _, _, _, _ = test(train_loader, model,
                                                  criterion, stat_3d)

        # test
        loss_test, err_test, _, _, _, _, _ = test(test_loader, model,
                                                  criterion, stat_3d)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test],
            ["int", "float", "float", "float", "float"],
        )

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        log.save_ckpt(
            {
                "epoch": epoch + 1,
                "lr": lr_now,
                "step": glob_step,
                "err": err_best,
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            },
            ckpt_path=opt.out_dir,
            is_best=is_best,
        )

    logger.close()
示例#12
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # data loading
    print("\n>>> loading data")
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    input_size = stat_3d['input_size']
    output_size = stat_3d['output_size']

    print('\n>>> input dimension: {} '.format(input_size))
    print('>>> output dimension: {} \n'.format(output_size))

    # save options
    log.save_options(opt, opt.out_dir)

    # create and initialise model
    model = LinearModel(input_size=input_size, output_size=output_size)
    model = model.cuda()
    model.apply(weight_init)
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # load pretrained ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    if opt.test:
        log_file = 'log_test.txt'
    else:
        log_file = 'log_train.txt'
    if opt.resume:
        logger = log.Logger(os.path.join(opt.out_dir, log_file), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.out_dir, log_file))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    #loader for testing and prediction
    test_loader = DataLoader(dataset=data_loader(data_path=opt.data_dir,
                                                 is_train=False,
                                                 predict=opt.predict),
                             batch_size=opt.batch_size,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    # test
    if opt.test | opt.predict:

        loss_test, err_test, joint_err, all_err, outputs, targets, inputs = \
        test(test_loader, model, criterion, stat_3d)

        print(os.path.join(opt.out_dir, "test_results.pth.tar"))
        torch.save(
            {
                'loss': loss_test,
                'all_err': all_err,
                'test_err': err_test,
                'joint_err': joint_err,
                'output': outputs,
                'target': targets,
                'input': inputs
            }, open(os.path.join(opt.out_dir, "test_results.pth.tar"), "wb"))

        if not opt.predict:
            print("{:.4f}".format(err_test), end='\t')

        sys.exit()

    # loader for training
    train_loader = DataLoader(dataset=data_loader(data_path=opt.data_dir,
                                                  is_train=True,
                                                  noise=opt.noise),
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    # loop through epochs
    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)

        #test
        loss_test, err_test, _, _, _, _, _ = test(test_loader, model,
                                                  criterion, stat_3d)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        log.save_ckpt(
            {
                'epoch': epoch + 1,
                'lr': lr_now,
                'step': glob_step,
                'err': err_best,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            ckpt_path=opt.out_dir,
            is_best=is_best)

    logger.close()
示例#13
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))
    """
    stat_3d.keys() =>  dict_keys(['std', 'dim_use', 'train', 'test', 'mean'])
    std => (96., )
    mean => (96.,)
    dim_use => (48, ) ?????
    train => dict{[user, action, camera_id]} ex) dict{[6, 'Walking', 'Walking 1.60457274.h5']} // data = int // len 600 = 15 actions * 8 cameras+extra_actions * 5 users
    test => same as train, user = 9, 11 // len 240
    (7,
     'Photo',
     'Photo 1.58860488.h5'): array([[514.54570615, -606.40670751, 5283.29114444],
                                    [513.19690503, -606.27874917, 5282.94296128],
                                    [511.72623278, -606.3556718, 5282.09161439],
                                    ...,
                                    [660.21544235, -494.87670603, 5111.48298849],
                                    [654.79473179, -497.67942449, 5111.05843265],
                                    [649.61962945, -498.74291164, 5111.91590807]])}

    """
    # actions = ["Directions",
    #            "Discussion",
    #            "Eating",
    #            "Greeting",
    #            "Phoning",
    #            "Photo",
    #            "Posing",
    #            "Purchases",
    #            "Sitting",
    #            "SittingDown",
    #            "Smoking",
    #            "Waiting",
    #            "WalkDog",
    #            "Walking",
    #            "WalkTogether"]
    # actions = ["Photo"]
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()