Ejemplo n.º 1
0
def ntu_anim_xyz(seed=None):
    """Create an animation of skeleton in ntu dataset
    """
    root = '../../Dataset/NTU-RGB-D'
    benchmark = 'xview_pre'
    xyz_data_path = os.path.join(root, 'xyz', benchmark, 'train_data.npy')
    xyz_label_path = os.path.join(root, 'xyz', benchmark, 'train_label.pkl')
    xyz_data = Feeder(xyz_data_path,
                      xyz_label_path,
                      num_samples=-1,
                      num_frames=20,
                      mmap=True)
    # Prepare data
    np.random.seed(seed)
    index = np.random.randint(len(xyz_data))
    xyz, label = xyz_data[index]
    xyz = xyz[..., 0]
    # Prepare fig and axe
    fig = plt.figure(ntu_info.LABEL_NAMES[label])
    ax = fig.add_subplot(1,
                         1,
                         1,
                         projection='3d',
                         xlabel='X',
                         ylabel='Y',
                         zlabel='Z')
    ax.view_init(azim=90, elev=-70)
    # Add animation
    anim = skeleton_anim(fig, ax, xyz, ntu_info.PARENTS, fps=10, colors=COLORS)
    anim.save('data/plots/ntu/xyz.gif', writer='imagemagick', fps=10)
    # Show()
    plt.show()
Ejemplo n.º 2
0
def ntu_avg_bone_length():
    from data.ntu.feeder import Feeder
    from data.ntu import ntu_info

    train_data_path = '/media/xuan/ssd/data/NTU-RGB-D/xyz/xsub/train_data.npy'
    train_label_path = '/media/xuan/ssd/data/NTU-RGB-D/xyz/xsub/train_label.pkl'
    train_data = Feeder(train_data_path,
                        train_label_path,
                        num_samples=-1,
                        num_frames=300,
                        mmap=True)
    xyz = np.zeros((3, 25))
    total_frames = 0
    for i in range(len(train_data)):
        seq, _ = train_data[i]
        for j in range(train_data.num_frames_data[i]):
            xyz += seq[:, j, :]
        total_frames += train_data.num_frames_data[i]
    xyz /= total_frames
    sk = Skeleton(parents=ntu_info.PARENTS)
    print(sk.compute_bone_lens(xyz))
Ejemplo n.º 3
0
def dhg_avg_bone_length():
    from data.dhg.feeder import Feeder
    from data.dhg import dhg_info

    train_data_path = '/media/xuan/ssd/data/dhg_processed/xyz/train_data.npy'
    train_label_path = '/media/xuan/ssd/data/dhg_processed/xyz/train_14_label.pkl'
    train_data = Feeder(train_data_path,
                        train_label_path,
                        num_samples=-1,
                        num_frames=200,
                        mmap=True)
    xyz = np.zeros((3, 22))
    total_frames = 0
    for i in range(len(train_data)):
        seq, _ = train_data[i]
        for j in range(train_data.num_frames_data[i]):
            xyz += seq[:, j, :]
        total_frames += train_data.num_frames_data[i]
    xyz /= total_frames
    sk = Skeleton(parents=dhg_info.PARENTS)
    print(sk.compute_bone_lens(xyz))
Ejemplo n.º 4
0
def ntu_anim_qabs(seed=None):
    """Create an animation of skeleton in ntu dataset
    """
    root = '/media/xuan/ssd/data/NTU-RGB-D-pre'
    benchmark = 'xsub'
    modality = 'quaternion'
    data_path = os.path.join(root, modality, benchmark, 'train_data.npy')
    label_path = os.path.join(root, modality, benchmark, 'train_label.pkl')
    data = Feeder(data_path,
                  label_path,
                  num_samples=-1,
                  num_frames=20,
                  mmap=True)
    sk = Skeleton(parents=ntu_info.PARENTS)
    # Prepare data
    np.random.seed(seed)
    index = np.random.randint(len(data))
    quaternion, label = data[index]
    quaternion = quaternion[..., 0]
    xyz = sk.qabs2xyz(quaternion.transpose(1, 0, 2),
                      ntu_info.AVG_BONE_LENS).transpose(1, 0, 2)
    # Prepare fig and axe
    fig = plt.figure(ntu_info.LABEL_NAMES[label])
    ax = fig.add_subplot(1,
                         1,
                         1,
                         projection='3d',
                         xlabel='X',
                         ylabel='Y',
                         zlabel='Z')
    ax.view_init(azim=90, elev=-70)
    # Add animation
    anim = skeleton_anim(fig, ax, xyz, ntu_info.PARENTS, fps=10, colors=COLORS)
    anim.save('data/plots/ntu/qabs.gif', writer='imagemagick', fps=10)
    # Show()
    plt.show()
Ejemplo n.º 5
0
        raise ValueError
    module, model_name = config['net'].rsplit('.', 1)
    module = importlib.import_module(module)
    model = getattr(module, model_name)
    print('model name', model_name)
    net = model(config['in_channels'], num_joints, config['data_param']['num_frames'], num_cls, config)
    load = os.path.join(load_path,'model.pkl')
    print('Test at ',load)
    print('Test data: ',data_dir)
    with open(os.path.join(load_path,'log.txt'),'r') as f:
        for line in f: 
            if 'Best' in line: print('!!!!!!', line,end='')
    weight = torch.load(load, map_location=lambda storage, loc:storage)
    net.load_state_dict(weight)
    net = net.to(device)

    val_label_path = os.path.join(data_dir, 'val_label.pkl')
    test_edge_path = os.path.join(data_dir, 'val_data_rel.npy') if config['use_edge'] else None

    if 'edge_only' in config and config['edge_only']:
        print('!!!!!!!EDGE_ONLY!!!!!!!!')
        testdata = Feeder(os.path.join(data_dir, 'val_data_rel.npy'), val_label_path, None, num_samples=-1,
                                        mmap=True, num_frames=config['data_param']['num_frames'])
    else:
        testdata = Feeder(os.path.join(data_dir, 'val_data.npy'), val_label_path, test_edge_path, num_samples=-1,
                                        mmap=True, num_frames=config['data_param']['num_frames'])
    testloader = torch.utils.data.DataLoader(testdata, batch_size=batch_size,
                            shuffle=False, num_workers=1, pin_memory=True, worker_init_fn=worker_init_fn)

    acc_eval, loss_eval = evaluate(config, net, testloader)
    print('eval loss: %.5f, eval acc: %.5f' % (loss_eval, acc_eval))
Ejemplo n.º 6
0
def train(config, logger):
    num_epochs = int(config['num_epochs'])
    batch_size = int(config['batch_size'])
    learning_rate = float(config['learning_rate'])
    weight_decay = float(config['weight_decay'])
    # Data
    logger.log_string("Loading dataset...")

    data_dir = config[config['dataset']]
    val_data_dir = data_dir
    if config['dataset'] == 'ntu':
        from data.ntu.feeder import Feeder
        num_joints = 25
        num_cls = 60
    elif config['dataset'] == 'fpha':
        from data.fpha.feeder import Feeder
        num_joints = 21
        num_cls = 45
    else:
        raise ValueError
    logger.log_string('Data dir: {}, num_joints: {}, num_cls: {}'.format(
        data_dir, num_joints, num_cls))

    # Get model
    module, model_name = config['net'].rsplit('.', 1)
    logger.backup_files([os.path.join(*module.split('.')) + '.py'])
    module = importlib.import_module(module)
    model = getattr(module, model_name)
    print('model name', model_name)
    net = model(config['in_channels'], num_joints,
                config['data_param']['num_frames'], num_cls, config)
    device_ids = config['device_ids']
    print('device_ids', device_ids)
    if config['resume'] is not '':
        logger.log_string('Resume from' + config['resume'])
        net.load_state_dict(torch.load(config['resume']))
    device = device_ids[0]
    net = net.to(device)

    def count_params(m):
        return sum(p.numel() for p in m.parameters() if p.requires_grad)

    logger.log_string('Model total number of params:' + str(count_params(net)))

    # Optimizer
    optimizer = optim.Adam(net.parameters(),
                           lr=learning_rate,
                           weight_decay=weight_decay)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=40,
                                          gamma=0.5,
                                          last_epoch=config['start_epoch'] - 2)

    train_label_path = os.path.join(data_dir, 'train_label.pkl')
    val_label_path = os.path.join(val_data_dir, 'val_label.pkl')
    train_edge_path = os.path.join(
        data_dir, 'train_data_rel.npy') if config['use_edge'] else None
    test_edge_path = os.path.join(
        val_data_dir, 'val_data_rel.npy') if config['use_edge'] else None

    if 'edge_only' in config and config['edge_only']:
        print(os.path.join(data_dir, 'train_data_rel.npy'))
        traindata = Feeder(os.path.join(data_dir, 'train_data_rel.npy'),
                           train_label_path,
                           None,
                           num_samples=-1,
                           mmap=True,
                           num_frames=config['data_param']['num_frames'])
        testdata = Feeder(os.path.join(val_data_dir, 'val_data_rel.npy'),
                          val_label_path,
                          None,
                          num_samples=-1,
                          mmap=True,
                          num_frames=config['data_param']['num_frames'])
    else:
        traindata = Feeder(os.path.join(data_dir, 'train_data.npy'),
                           train_label_path,
                           train_edge_path,
                           num_samples=-1,
                           mmap=True,
                           num_frames=config['data_param']['num_frames'])
        testdata = Feeder(os.path.join(val_data_dir, 'val_data.npy'),
                          val_label_path,
                          test_edge_path,
                          num_samples=-1,
                          mmap=True,
                          num_frames=config['data_param']['num_frames'])
    logger.log_string('Train samples %d' % len(traindata))
    logger.log_string('Test samples %d' % len(testdata))

    trainloader = torch.utils.data.DataLoader(traindata,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4,
                                              pin_memory=True,
                                              worker_init_fn=worker_init_fn)
    testloader = torch.utils.data.DataLoader(testdata,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=4,
                                             pin_memory=True,
                                             worker_init_fn=worker_init_fn)
    best_acc = 0.

    # Whether use schedular
    change_lr = True

    for epoch in range(config['start_epoch'], num_epochs + 1):
        np.random.seed()  # reset seed
        tic = time()
        net.train()
        correct = 0
        total = 0
        running_loss = 0.0
        num_iters = 0
        # Train
        if torch.__version__ == '1.0.0':
            if change_lr:
                scheduler.step()  # Adjust learning rate
                logger.log_scalar_train('Learning rate',
                                        scheduler.get_lr()[0], epoch)
                print(scheduler.get_lr()[0])

        for data in tqdm(trainloader,
                         total=len(trainloader),
                         disable=not config['tqdm'],
                         ascii=True):
            # for data in trainloader:
            inputs, labels = data
            if config['padding_input']:
                pad = torch.zeros([
                    inputs.shape[0], 1, inputs.shape[2], inputs.shape[3],
                    inputs.shape[4]
                ])
                inputs = torch.cat([pad, inputs.type_as(pad)], dim=1)
            # Data Augmentation
            if config['data_augmentation']:
                inputs = random_rotate(inputs, y_only=True)
            if config['use_edge']:
                inputs[0], inputs[1], labels = inputs[0].to(
                    device), inputs[1].to(device), labels.to(device)
            else:
                inputs, labels = inputs.to(device), labels.to(device)

            # Freeze ADJ matrix for some epochs
            if config['net'] in ['models.dgnn.Model', 'models.qdgnn.Model']:
                for name, params in net.named_parameters():
                    if 'source_M' in name or 'target_M' in name:
                        params.requires_grad = epoch > 10

            optimizer.zero_grad()
            outputs = net(inputs)
            loss = net.get_loss(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            num_iters = num_iters + 1

        if torch.__version__ in ['1.1.0', '1.2.0']:
            if change_lr:
                scheduler.step()  # Adjust learning rate

        # Eval and metrics
        acc_train = correct / total
        loss_train = running_loss / num_iters
        acc_eval, loss_eval = evaluate(config, net, testloader)
        if acc_eval > best_acc:
            best_acc = acc_eval
            # Save trained model
            torch.save(net.state_dict(),
                       os.path.join(config['logdir'], 'model.pkl'))
        logger.log_string(
            'Epoch %d: train loss: %.5f, eval loss: %.5f, train acc: %.5f, eval acc: %.5f, time: %.5f'
            %
            (epoch, loss_train, loss_eval, acc_train, acc_eval, time() - tic))
        logger.log_scalar_train('Loss', loss_train, epoch)
        logger.log_scalar_train('Accuracy', acc_train, epoch)
        logger.log_scalar_eval('Loss', loss_eval, epoch)
        logger.log_scalar_eval('Accuracy', acc_eval, epoch)

    logger.log_string('Best eval acc: %.5f' % (best_acc))
    logger.log_string('Finished Training')
    logger.close()