Exemplo n.º 1
0
def test():
    args = parse_args()
    cfg = Config.from_file(args.config)

    data_path = cfg.test.source_data
    out = cfg.test.out
    model_path = cfg.test.gen_path

    print('GPU: {}'.format(args.gpu))

    # number of gesture classes and users
    n_gesture = cfg.train.n_gesture
    n_user = len(cfg.train.dataset_dirs)
    n_style = n_gesture if cfg.style == 'gesture' else n_user

    ## Import generator model
    gen = getattr(models, cfg.train.generator.model)(cfg.train.generator,
                                                     n_style=n_style)
    serializers.load_npz(model_path, gen)
    print('')
    print(f'loading generator weight from {model_path}')

    ## Set GPU
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()

    test_data = data_load(data_path, ges_class=cfg.test.ges_class)
    test_data = np.expand_dims(test_data, axis=1)

    style_label = np.zeros((test_data.shape[0], n_style * 2, 1, 1))
    source = cfg.test.ges_class if cfg.style == 'gesture' else cfg.source_user
    target = cfg.test.target_style
    style_label[:, source] += 1
    style_label[:, target + n_style] += 1

    test_data = Variable(cuda.to_gpu(test_data.astype(np.float32)))
    style_label = Variable(cuda.to_gpu(style_label.astype(np.float32)))

    with chainer.using_config('train', False), chainer.using_config(
            'enable_backprop', False):
        gen_data = gen(test_data, style_label)
    gen_data.to_cpu()

    if cfg.style == 'gesture':
        save_path = f'./{out}/user{cfg.source_user}/ges{target}_from_ges{source}'
    else:
        save_path = f'./{out}/user{target}/ges{cfg.test.ges_class}_from_user{source}'
    if not os.path.exists(os.path.dirname(save_path)):
        os.makedirs(os.path.dirname(save_path))
    print('saving generated data to ' + save_path + '.npy')
    np.save(save_path, gen_data.data)
Exemplo n.º 2
0
def main():
    checkpoint_path = '/DATA/wangshen_data/UCF101/voxelflow_finetune_model_best.pth.tar'
    global cfg, best_PSNR
    args = parse_args()
    cfg = Config.from_file(args.config)
    str1 =  ','.join(str(gpu) for gpu in cfg.device)
    print(str1)
    os.environ["CUDA_VISIBLE_DEVICES"] = str1
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    cudnn.benchmark = True
    cudnn.fastest = True

    if hasattr(datasets, cfg.dataset):
        ds = getattr(datasets, cfg.dataset)
    else:
        raise ValueError('Unknown dataset ' + cfg.dataset)

    model = getattr(models, cfg.model.name)(cfg.model).cuda()
    cfg.train.input_mean = model.input_mean
    cfg.train.input_std = model.input_std
    cfg.test.input_mean = model.input_mean
    cfg.test.input_std = model.input_std

    input_mean = cfg.test.input_mean
    input_std = cfg.test.input_std

    # Data loading code

    val_loader = torch.utils.data.DataLoader(
        datasets.VimeoTest(cfg.test),
        batch_size=1,
        shuffle=False,
        num_workers=0, #32,
        pin_memory=True)

    if os.path.isfile(checkpoint_path):
        print(("=> loading checkpoint '{}'".format(checkpoint_path)))
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['state_dict'], False)
    else:
        print(("=> no checkpoint found at '{}'".format(checkpoint_path)))

    model = DataParallelwithSyncBN(
        model, device_ids=range(len(cfg.device))).cuda()

    # define loss function (criterion) optimizer and evaluator
    criterion = torch.nn.MSELoss().cuda()
    evaluator = EvalPSNR(255.0 / np.mean(cfg.test.input_std))


    PSNR = validate(val_loader, model, criterion, evaluator, input_mean, input_std)

    print(PSNR)
Exemplo n.º 3
0
def calcurate_average_speed():
    parser = argparse.ArgumentParser()
    parser.add_argument('config')
    parser.add_argument('--out', required=True)
    args = parser.parse_args()

    cfg = Config.from_file(args.config)
    npy_paths = collect_path(cfg.train.dataset)

    head, ext = os.path.splitext(cfg.train.dataset)
    head, data_name = os.path.split(head)

    class_list = cfg.train.class_list

    v_traj_avg_dict = {label: 0 for label in class_list}
    class_npy_paths = {label: [] for label in class_list}
    for npy_path in npy_paths:
        npy_name = os.path.splitext(os.path.split(npy_path)[1])[0]
        label = npy_name.split('_')[0]
        class_npy_paths[label].append(npy_path)

    for label in class_npy_paths.keys():
        class_v_traj = None
        for npy_path in class_npy_paths[label]:
            motion = np.load(npy_path)
            trajX = motion[:, 0]
            trajZ = motion[:, 2]
            v_trajX = trajX[:-1] - trajX[1:]
            v_trajZ = trajZ[:-1] - trajZ[1:]
            class_v_traj = np.sqrt(
                v_trajX**2 +
                v_trajZ**2) if class_v_traj is None else np.concatenate(
                    (class_v_traj, np.sqrt(v_trajX**2 + v_trajZ**2)), axis=0)
        if not class_npy_paths[label]:
            continue
        class_v_traj_avg = np.average(class_v_traj, axis=0)
        v_traj_avg_dict[label] = class_v_traj_avg
    print(v_traj_avg_dict)

    with open(args.out, 'wb') as f:
        pickle.dump(v_traj_avg_dict, f)

    return
Exemplo n.º 4
0
def main():
    global cfg, best_PSNR
    args = parse_args()
    cfg = Config.from_file(args.config)

    #os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(gpu) for gpu in cfg.device)
    cudnn.benchmark = True
    cudnn.fastest = True

    #if hasattr(datasets, cfg.dataset):
    #    ds = getattr(datasets, cfg.dataset)
    #else:
    #    raise ValueError('Unknown dataset ' + cfg.dataset)

    model = getattr(models, cfg.model.name)(cfg.model).cuda()
    cfg.train.input_mean = model.input_mean
    cfg.train.input_std = model.input_std
    cfg.test.input_mean = model.input_mean
    cfg.test.input_std = model.input_std

    # Data loading code
    train_loader = torch.utils.data.DataLoader(
        #datasets.Combined240('data/combined', cfg.train),
        datasets.VimeoSeptuplet('data/vimeo_septuplet', cfg.train, True),
        # ds(cfg.train),
        batch_size=cfg.train.batch_size,
        shuffle=True,
        num_workers=8,
        pin_memory=True,
        drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        # datasets.UCF101(cfg.test, False),
        datasets.VimeoSeptuplet('data/vimeo_septuplet', cfg.test, False),
        batch_size=cfg.test.batch_size,
        shuffle=False,
        num_workers=8,
        pin_memory=True)

    '''test_db_list = {'ucf101': 'UCF101Voxelflow', 'middlebury': 'Middlebury'}
    test_db = 'middlebury'
    test_loader = torch.utils.data.DataLoader(
        datasets.UCF101Voxelflow('data/UCF-101', cfg.test),
        # datasets.Middlebury('data/Middlebury', cfg.test),
        #getattr(datasets, test_db_list[test_db])('data/Middlebury', cfg.test),
        batch_size=1,#cfg.test.batch_size,
        shuffle=False,
        num_workers=16,
        pin_memory=True)
    '''
    cfg.train.optimizer.args.max_iter = (
        cfg.train.optimizer.args.max_epoch * len(train_loader))

    policies = model.get_optim_policies()
    for group in policies:
        print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
            group['name'],
            len(group['params']), group['lr_mult'], group['decay_mult'])))
    optimizer = Optim(policies, cfg.train.optimizer)

    if cfg.resume or cfg.weight:
        checkpoint_path = cfg.resume if cfg.resume else cfg.weight
        if os.path.isfile(checkpoint_path):
            print(("=> loading checkpoint '{}'".format(checkpoint_path)))
            checkpoint = torch.load(checkpoint_path)
            model.load_state_dict(checkpoint['state_dict'], False)
            if cfg.resume:
                optimizer.load_state_dict(checkpoint['grad_dict'])
            del checkpoint
        else:
            print(("=> no checkpoint found at '{}'".format(checkpoint_path)))

    #model = DataParallelwithSyncBN(
    #    model, device_ids=range(len(cfg.device))).cuda()
    model = model.cuda()

    # define loss function (criterion) optimizer and evaluator
    criterion = torch.nn.MSELoss().cuda()
    # evaluator = EvalPSNR(255.0 / np.mean(cfg.test.input_std))
    evaluator = EvalPSNR(255.0 / np.mean(cfg.test.input_std), cfg.test.input_mean, cfg.test.input_std)

    #PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
    ##PSNR = test(test_loader, model, optimizer, criterion, evaluator)
    if cfg.mode == 'test':
        PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
        return

    for epoch in range(cfg.train.optimizer.args.max_epoch):

        # train for one epoch
        train(train_loader, model, optimizer, criterion, epoch)
        # evaluate on validation set
        if ((epoch + 1) % cfg.logging.eval_freq == 0
                or epoch == cfg.train.optimizer.args.max_epoch - 1):
            PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
            # remember best PSNR and save checkpoint
            is_best = PSNR > best_PSNR
            best_PSNR = max(PSNR, best_PSNR)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': dict(cfg),
                'state_dict': model.state_dict(),
                'grad_dict': optimizer.state_dict(),
                'best_PSNR': best_PSNR,
            }, is_best)
Exemplo n.º 5
0
def train():
    args = parse_args()
    cfg = Config.from_file(args.config)

    iteration = cfg.train.iterations
    batchsize = cfg.train.batchsize
    out = cfg.train.out

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(batchsize))
    print('# iteration: {}'.format(iteration))
    print('')

    ## Set up networks to train
    # number of gesture classes and users
    n_gesture = cfg.train.n_gesture
    n_user = len(cfg.train.dataset_dirs)
    if cfg.style == 'gesture':
        gen = getattr(models, cfg.train.generator.model)(cfg.train.generator,
                                                         n_style=n_gesture)
    elif cfg.style == 'user':
        gen = getattr(models, cfg.train.generator.model)(cfg.train.generator,
                                                         n_style=n_user)
    else:
        print(f'Invalid style: {cfg.style}')
        exit()
    dis = getattr(models,
                  cfg.train.discriminator.model)(cfg.train.discriminator,
                                                 n_gesture=n_gesture,
                                                 n_user=n_user)

    ## Load resume checkpoint to restart. Load optimizer state later.
    if args.resume or cfg.resume:
        gen_resume = args.resume if args.resume else cfg.resume
        print(f'loading generator resume from {os.path.join(out, gen_resume)}')
        serializers.load_npz(os.path.join(out, gen_resume), gen)
        dis_resume = gen_resume.replace('gen', 'dis')
        print(
            f'loading discriminator resume from {os.path.join(out, dis_resume)}'
        )
        serializers.load_npz(os.path.join(out, dis_resume), dis)

    ## Load resume checkpoint (only weight, not load optimizer state)
    if cfg.weight:
        gen_weight = cfg.weight
        print(f'loading generator weight from {gen_weight}')
        serializers.load_npz(gen_weight, gen)
        dis_weight = gen_weight.replace('gen', 'dis')
        print(f'loading discriminator weight from {dis_weight}')
        serializers.load_npz(dis_weight, dis)

    ## Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        return optimizer

    opt_gen = make_optimizer(gen, alpha=cfg.train.parameters.g_lr, beta1=0.5)
    opt_dis = make_optimizer(dis, alpha=cfg.train.parameters.d_lr, beta1=0.5)
    if args.resume or cfg.resume:
        opt_gen_resume = gen_resume.replace('gen', 'opt_gen')
        print(
            f'loading generator optimizer from {os.path.join(out, opt_gen_resume)}'
        )
        serializers.load_npz(opt_gen_resume, opt_gen)
        opt_dis_resume = gen_resume.replace('gen', 'opt_dis')
        print(
            f'loading discriminator optimizer from {os.path.join(out, opt_dis_resume)}'
        )
        serializers.load_npz(opt_dis_resume, opt_dis)

    ## Set GPU
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()

    ## Set up dataset
    train = GestureDataset(cfg.train.dataset_dirs,
                           style=cfg.style,
                           equal=cfg.train.class_equal)
    for i in range(len(cfg.train.dataset_dirs)):
        print(
            f'{cfg.train.dataset_dirs[i]} contains {train.len_each()[i]} samples'
        )

    train_iter = chainer.iterators.SerialIterator(train, batchsize)

    ## Set up a Trainer
    updater = StarGANUpdater(models=(gen, dis),
                             iterator=train_iter,
                             optimizer={
                                 'gen': opt_gen,
                                 'dis': opt_dis
                             },
                             cfg=cfg,
                             out=out)
    trainer = training.Trainer(updater, (iteration, 'iteration'), out=out)

    ## Set invervals
    ##     - display_interval : Interval iterations of print logs on display.
    ##     - save_interval : Interval iterations of save models.
    display_interval = (cfg.train.display_interval, 'iteration')
    save_interval = (cfg.train.save_interval, 'iteration')
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_iter_{.updater.iteration}.npz'),
                   trigger=save_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=save_interval)
    trainer.extend(extensions.snapshot_object(
        opt_gen, 'opt_gen_iter_{.updater.iteration}.npz'),
                   trigger=save_interval)
    trainer.extend(extensions.snapshot_object(
        opt_dis, 'opt_dis_iter_{.updater.iteration}.npz'),
                   trigger=save_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'lr', 'gen/loss_adv', 'gen/loss_eq',
        'gen/loss_style', 'gen/loss_cont', 'gen/loss_rec', 'gen/loss_sm',
        'dis/loss_adv', 'dis/loss_style', 'dis/loss_cont'
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=display_interval[0]))

    ## Save scripts and command to result directory. (To look back later.)
    if not os.path.exists(out):
        os.makedirs(out)
    shutil.copy(args.config, f'./{out}')
    shutil.copy('./core/models/StarGAN.py', f'./{out}')
    shutil.copy('./core/updater/StarGANupdater.py', f'./{out}')

    commands = sys.argv
    with open(f'./{out}/command.txt', 'w') as f:
        f.write(f'python {commands[0]} ')
        for command in commands[1:]:
            f.write(command + ' ')

    ## Run the training
    trainer.run()

    ## Once training finishid, save all models.
    modelname = f'./{out}/gen.npz'
    print('saving generator model to ' + modelname)
    serializers.save_npz(modelname, gen)

    modelname = f'./{out}/dis.npz'
    print('saving discriminator model to ' + modelname)
    serializers.save_npz(modelname, dis)

    optname = f'./{out}/opt_gen.npz'
    print('saving generator optimizer to ' + optname)
    serializers.save_npz(optname, opt_gen)

    optname = f'./{out}/opt_dis.npz'
    print('saving discriminator optimizer to ' + optname)
    serializers.save_npz(optname, opt_dis)
Exemplo n.º 6
0
def main():
    def save_frame(th, do_pause, dir_name='', vis=True):
        result = prob_to_label(combine_prob(pred_prob[th]))
        result_show = np.dstack(
            (colors[result, 0], colors[result, 1], colors[result,
                                                          2])).astype(np.uint8)
        if args.output != '' and dir_name != '':
            out_file = os.path.join(dataset_dir, 'Results', 'Segmentations',
                                    resolution, args.output, dir_name,
                                    video_dir, '%05d.png' % th)
            if not os.path.exists(os.path.split(out_file)[0]):
                os.makedirs(os.path.split(out_file)[0])
            if vis:
                cv2.imwrite(out_file, result_show)
            else:
                cv2.imwrite(out_file, result)
        temp = cv2.resize(frames[th],
                          frame_0.shape[-2::-1]) * 0.3 + result_show * 0.7
        return
        cv2.imshow('Result', temp.astype(np.uint8))
        if do_pause:
            cv2.waitKey()
        else:
            cv2.waitKey(100)

    colors = labelcolormap(256)

    global pred_prob, frames, flow1, flow2, orig_mask, \
        model, instance_num, fr_h_r, fr_w_r, appear, bbox_cnt, \
        location, patch_shapes

    args = parse_args()
    cfg = Config.from_file(args.config)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

    model = []
    model = getattr(models, cfg.model.name)(cfg.model)

    if os.path.isfile(cfg.weight):
        print(("=> loading checkpoint '{}'".format(cfg.weight)))
        checkpoint = torch.load(cfg.weight)
        model.load_state_dict(checkpoint['state_dict'], True)
        print(("=> loaded checkpoint"))
    else:
        raise (("=> no checkpoint found at '{}'".format(cfg.weight)))
    model = model.cuda()
    model.eval()

    cudnn.benchmark = True

    # Setup dataset
    dataset_dir = os.path.join('data/DAVIS')
    resolution = '480p'
    imageset_dir = os.path.join(dataset_dir, 'ImageSets', '2017',
                                args.testset + '.txt')

    video_list = []
    for line in open(imageset_dir).readlines():
        if line.strip() != '':
            video_list.append(line.strip())

    person_all = pickle_load(os.path.join(dataset_dir, 'PersonSearch',
                                          resolution, args.testset + '.pkl'),
                             encoding='latin')
    object_all = pickle_load(os.path.join(dataset_dir, 'ObjectSearch',
                                          resolution, args.testset + '.pkl'),
                             encoding='latin')
    category_all = pickle_load(os.path.join(dataset_dir, 'Class', resolution,
                                            args.testset + '.pkl'),
                               encoding='latin')
    frame_cnt = 0

    use_cache = False  #(args.cache != '')
    video_cnt = -1

    for video_dir in video_list:
        video_cnt += 1
        frame_dir = os.path.join(dataset_dir, 'JPEGImages', resolution,
                                 video_dir)
        frame_fr_dir = os.path.join(dataset_dir, 'JPEGImages',
                                    'Full-Resolution', video_dir)
        label_dir = os.path.join(dataset_dir, 'Annotations', resolution,
                                 video_dir)
        flow_dir = os.path.join(dataset_dir, 'Flow', resolution, video_dir)
        cache_dir = os.path.join(dataset_dir, 'Cache', resolution, args.cache,
                                 video_dir)
        frames_num = len(os.listdir(frame_dir))

        if (video_cnt % args.gpu_num != args.gpu):
            frame_cnt += frames_num
            continue

        frame_0 = cv2.imread(os.path.join(frame_dir, '%05d.jpg' % 0))
        #label_0 = cv2.imread(os.path.join(label_dir, '%05d.png' % 0), cv2.IMREAD_UNCHANGED)
        label_0 = cv2.imread(os.path.join(label_dir, '%05d.png' % 0),
                             cv2.IMREAD_GRAYSCALE)
        label_0 = convertColorMask2Ordinary(label_0)

        instance_num = label_0.max()

        frames = [None for _ in range(frames_num)]
        pred_prob = [None for _ in range(frames_num)]
        flow1 = [None for _ in range(frames_num)]
        flow2 = [None for _ in range(frames_num)]
        person_reid = [[None for _ in range(instance_num)]
                       for _ in range(frames_num)]
        object_reid = [[None for _ in range(instance_num)]
                       for _ in range(frames_num)]
        category = category_all[video_dir]
        orig_mask = [None for _ in range(instance_num)]

        frames[0] = cv2.imread(os.path.join(frame_fr_dir, '%05d.jpg' % 0))
        fr_h_r = float(frames[0].shape[0]) / float(frame_0.shape[0])
        fr_w_r = float(frames[0].shape[1]) / float(frame_0.shape[1])
        pred_prob[0] = label_to_prob(label_0, instance_num)
        person_reid[0] = person_all[frame_cnt]
        object_reid[0] = object_all[frame_cnt]

        save_frame(0, False, 'result', False)

        bbox = gen_bbox(label_0, range(instance_num), True)
        for i in range(instance_num):
            orig_mask[i] = pred_prob[0][bbox[i, 1]:bbox[i, 3],
                                        bbox[i, 0]:bbox[i, 2], i * 2 + 1]

        for th in range(1, frames_num):
            frames[th] = cv2.imread(os.path.join(frame_fr_dir,
                                                 '%05d.jpg' % th))
            pred_prob[th] = label_to_prob(np.zeros_like(label_0, np.uint8),
                                          instance_num)
            flow1[th - 1] = flo.readFlow(
                os.path.join(flow_dir, '%05d.flo' % (th - 1)))
            flow2[th] = flo.readFlow(os.path.join(flow_dir, '%05d.rflo' % th))
            person_reid[th] = person_all[frame_cnt + th]
            object_reid[th] = object_all[frame_cnt + th]

        bbox_cnt = -1000 * np.ones((frames_num, instance_num))
        bbox_cnt[0, :] = 0

        for th in range(frames_num):
            for i in range(instance_num):
                person_reid[th][i] = person_reid[th][i][:, [0, 1, 2, 3, 5]]
                object_reid[th][i] = object_reid[th][i][:, [0, 1, 2, 3, 5]]
        frame_cnt += frames_num

        cache_file = os.path.join(cache_dir, '%s.pkl' % video_dir)

        if (use_cache and os.path.exists(cache_file)):
            pred_prob, bbox_cnt = pickle_load(cache_file, encoding='latin')
        else:
            predict(1, frames_num, 1, range(instance_num))
            if use_cache:
                if not os.path.exists(os.path.split(cache_file)[0]):
                    os.makedirs(os.path.split(cache_file)[0])
                pickle_dump((pred_prob, bbox_cnt), cache_file)

        appear = np.zeros((frames_num, instance_num)).astype(int)
        location = np.zeros((frames_num, instance_num, 2)).astype(int)
        update_appear()

        for th in range(frames_num):
            save_frame(th, False, 'draft', True)

        for reid_target in ['person', 'object']:
            cache_file = os.path.join(cache_dir,
                                      '%s_%s.pkl' % (reid_target, video_dir))

            if (use_cache and os.path.exists(cache_file)):
                pred_prob, bbox_cnt = pickle_load(cache_file, encoding='latin')
            else:
                target_instance = []
                for i in range(instance_num):
                    if (reid_target == 'object'
                            or category[i][123] > 0.5):  # person is 123
                        target_instance.append(i)
                reid_score = person_reid if reid_target == 'person' else object_reid
                draft_cnt = 0
                while (True):
                    max_score = 0
                    for i in range(1, frames_num - 1):
                        temp_label = prob_to_label(combine_prob(pred_prob[i]))
                        bbox_i = gen_bbox(temp_label, range(instance_num),
                                          False, 0.99)
                        for j in target_instance:
                            if bbox_cnt[
                                    i,
                                    j] != i and reid_score[i][j].shape[0] > 0:
                                bbox_id = np.argmax(reid_score[i][j][:, 4])
                                # retrieval
                                if (appear[i, j] == 0):
                                    x1, y1, x2, y2 = reid_score[i][j][bbox_id,
                                                                      0:4]
                                    if (reid_score[i][j][bbox_id,
                                                         4] > max_score
                                            and reid_score[i][j][bbox_id,
                                                                 4] > reid_th):

                                        bbox_now = reid_score[i][j][
                                            bbox_id, 0:4].astype(int)
                                        result = np.bincount(temp_label[
                                            bbox_now[1]:bbox_now[3] + 1,
                                            bbox_now[0]:bbox_now[2] +
                                            1].flatten(),
                                                             minlength=j + 2)
                                        flag = True

                                        if flag:
                                            for occ_instance in np.where(
                                                    result[1:] > 0)[0]:
                                                if (IoU(
                                                        bbox_now,
                                                        bbox_i[occ_instance]) >
                                                        bbox_occ_th):
                                                    flag = False

                                        if flag:
                                            for k in target_instance:
                                                if (k != j
                                                        and appear[i, k] == 0
                                                        and reid_score[i][k][
                                                            bbox_id, 4] >
                                                        reid_score[i][j][
                                                            bbox_id, 4]):
                                                    flag = False

                                        if flag:
                                            max_frame = i
                                            max_instance = j
                                            max_bbox = reid_score[i][j][
                                                bbox_id, 0:4]
                                            max_score = reid_score[i][j][
                                                bbox_id, 4]

                    if (max_score == 0):
                        break

                    bbox_cnt[max_frame, max_instance] = max_frame

                    predict_single(max_frame, max_instance, max_bbox,
                                   orig_mask[max_instance])
                    save_frame(max_frame, False,
                               '%s_%05d_checkpoint' % (reid_target, draft_cnt))

                    temp = 0
                    for i in range(max_frame - 1, -1, -1):
                        if appear[i, max_instance] != 0:
                            temp = i
                            break
                    predict(max_frame - 1, temp, -1, [max_instance])

                    temp = frames_num
                    for i in range(max_frame + 1, frames_num, 1):
                        if appear[i, max_instance] != 0:
                            temp = i
                            break
                    predict(max_frame + 1, temp, 1, [max_instance])
                    update_appear()

                    for th in range(frames_num):
                        save_frame(th, False,
                                   '%s_%05d' % (reid_target, draft_cnt))

                    draft_cnt = draft_cnt + 1

                for th in range(frames_num):
                    save_frame(th, False, '%s' % reid_target)

                if use_cache:
                    pickle_dump((pred_prob, bbox_cnt), cache_file)

        for th in range(frames_num):
            save_frame(th, False, 'result', False)
Exemplo n.º 7
0
def train():
    global args, cfg, device
    args = parse_args()
    cfg = Config.from_file(args.config)


    #======================================================================   
    #
    ### Set up training
    #
    #======================================================================

    # Set ?PU device
    cuda = torch.cuda.is_available()
    if cuda:
        print('\033[1m\033[91m' + '# cuda available!' + '\033[0m')
        device = torch.device(f'cuda:{args.gpu}')
    else:
        device = 'cpu'

    # set start iteration
    iteration = 0

    # Set up networks to train
    num_class = len(cfg.train.dataset.class_list)
    gen = getattr(models, cfg.models.generator.model)(cfg.models.generator, num_class).to(device)
    dis = getattr(models, cfg.models.discriminator.model)(cfg.models.discriminator, cfg.train.dataset.frame_nums//cfg.train.dataset.frame_step, num_class).to(device)
    networks = {'gen': gen, 'dis': dis}

    
    # Load resume state_dict (to restart training)
    if args.resume:
        checkpoint_path = args.resume
        if os.path.isfile(checkpoint_path):
            print(f'loading checkpoint from {checkpoint_path}')
            checkpoint = torch.load(checkpoint_path, map_location=device)
            for name, model in networks.items():
                model.load_state_dict(checkpoint[f'{name}_state_dict'])
            iteration = checkpoint['iteration']


    # Set up an optimizer
    gen_lr = cfg.train.parameters.g_lr
    dis_lr = cfg.train.parameters.d_lr
    opts = {}
    opts['gen'] = torch.optim.Adam(gen.parameters(), lr=gen_lr, betas=(0.5, 0.999))
    opts['dis'] = torch.optim.Adam(dis.parameters(), lr=dis_lr, betas=(0.5, 0.999))

    # Load resume state_dict
    if args.resume:
        opts['gen'].load_state_dict(checkpoint['opt_gen_state_dict'])
        opts['dis'].load_state_dict(checkpoint['opt_dis_state_dict'])
           

    # Set up dataset
    train_dataset = BVHDataset(cfg.train.dataset, mode='train')
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size = cfg.train.batchsize,
        num_workers = cfg.train.num_workers,
        shuffle=True,
        drop_last=True)
    print(f'Data root \033[1m\"{cfg.train.dataset.data_root}\"\033[0m contains \033[1m{len(train_dataset)}\033[0m samples.')


    # Save scripts and command
    if not os.path.exists(cfg.train.out):
        os.makedirs(cfg.train.out)
    shutil.copy(args.config, f'./{cfg.train.out}')
    shutil.copy('./core/models/MotionGAN.py', f'./{cfg.train.out}')
    shutil.copy('./train.py', f'./{cfg.train.out}')

    commands = sys.argv
    with open(f'./{cfg.train.out}/command.txt', 'w') as f:
        f.write(f'python {commands[0]} ')
        for command in commands[1:]:
            f.write(command + ' ') 

    # Set Criterion
    if cfg.train.GAN_type == 'normal':
         GAN_criterion = torch.nn.BCELoss().to(device)
    elif cfg.train.GAN_type == 'ls':
         GAN_criterion = torch.nn.MSELoss().to(device)
    else:
         GAN_criterion = None
    BCE_criterion = torch.nn.BCELoss().to(device)
    base_criterion = torch.nn.MSELoss().to(device)


    # Tensorboard Summary Writer
    writer = tbx.SummaryWriter(log_dir=os.path.join(cfg.train.out, 'log'))


    # train
    print('\033[1m\033[93m## Start Training!! ###\033[0m')
    while iteration < cfg.train.total_iterations:
        iteration = train_loop(train_loader,
                               train_dataset,
                               networks,
                               opts,
                               iteration,
                               cfg.train.total_iterations,
                               GAN_criterion,
                               BCE_criterion,
                               base_criterion,
                               writer)

    # Save final model
    state = {'iteration':iteration, 'config':dict(cfg)}
    state[f'gen_state_dict'] = gen.state_dict()
    state[f'dis_state_dict'] = dis.state_dict()
    state['opt_gen_state_dict'] = opts['gen'].state_dict()
    state['opt_dis_state_dict'] = opts['dis'].state_dict()
     
    path = os.path.join(os.path.join(cfg.train.out,'checkpoint'), f'checkpoint.pth.tar')
    torch.save(state, path)
    torch.save(gen.state_dict(), os.path.join(cfg.train.out,f'gen.pth'))
    torch.save(dis.state_dict(), os.path.join(cfg.train.out,f'dis.pth'))
    print(f'trained model saved!')

    writer.close()
Exemplo n.º 8
0
def main():
    args = parse_args()
    cfg = Config.from_file(args.config)

    # Set ?PU device
    cuda = torch.cuda.is_available()
    if cuda:
        print('\033[1m\033[91m' + '# cuda available!' + '\033[0m')
        device = torch.device(f'cuda:{args.gpu}')
    else:
        device = 'cpu'

    ## Set up generator network
    num_class = len(cfg.train.dataset.class_list)
    gen = getattr(models, cfg.models.generator.model)(cfg.models.generator,
                                                      num_class).to(device)

    ## Load weight
    if args.weight is not None:
        checkpoint_path = args.weight
    else:
        checkpoint_path = os.path.join(cfg.test.out, 'gen.pth')
        if not os.path.exists(checkpoint_path):
            checkpoint_path = sorted(
                glob.glob(
                    os.path.join(cfg.test.out, 'checkpoint',
                                 'iter_*.pth.tar')))[-1]

    if not os.path.exists(checkpoint_path):
        print('Generator weight not found!')
        sys.exit()
    else:
        print(f'Loading generator model from \033[1m{checkpoint_path}\033[0m')
        checkpoint = torch.load(checkpoint_path, map_location=device)
        if 'gen_state_dict' in checkpoint:
            gen.load_state_dict(checkpoint['gen_state_dict'])
            iteration = checkpoint['iteration']
        else:
            gen.load_state_dict(checkpoint)
            iteration = cfg.train.total_iterations
    gen.eval()

    ## Create output directory
    result_dir = f'{cfg.test.out}/statistics/iter_{iteration}'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    ## Parse component list (for only PCA and tSNE)
    components = []
    for pattern in re.findall(r'(\d+-\d+)', args.components):
        pair = pattern.split('-')
        components.append([int(pair[0]), int(pair[1])])

    ## Conduct analisis

    # Apply PCA
    if args.mode == 'pca':
        result_path = os.path.join(result_dir, f'PCA_{args.target}.png')
        apply_pca(cfg, gen, result_path, args.target, components, device)

    # Apply tSNE
    elif args.mode == 'tSNE':
        result_path = os.path.join(result_dir, f'tSNE_{args.target}.pdf')
        apply_tSNE(cfg, gen, result_path, args.target, components, device)

    # Calcurate distance between centroid of each cluster and visualize as heatmap on matrix
    elif args.mode == 'heatmap':
        assert args.target == 'w'
        draw_heatmap(cfg, gen, result_dir, device)

    else:
        raise ValueError('Invalid mode!')
Exemplo n.º 9
0
def make_video():
    args = parse_args()
    cfg = Config.from_file(args.config)


    #======================================================================
    #
    ### Set up model
    # 
    #======================================================================

    ## Set ?PU device
    cuda = torch.cuda.is_available()
    if cuda and args.gpu > -1:
        print('\033[1m' + '# cuda available!' + '\033[0m')
        device = torch.device(f'cuda:{args.gpu}')
    else:
        device = 'cpu'

    ## Define Generator
    gen = getattr(models, cfg.models.generator.model)(cfg.models.generator, len(cfg.train.dataset.class_list)).to(device)

    # Load weight
    if args.weight is None:
        checkpoint_path = os.path.join(cfg.test.out, 'gen.pth')
        if not os.path.exists(checkpoint_path):
            checkpoint_path = sorted(glob.glob(os.path.join(cfg.test.out, 'checkpoint', 'iter_*.pth.tar')))[-1]
    else:
        checkpoint_path = args.weight

    if not os.path.exists(checkpoint_path):
        print('\033[31m' + 'generator weight not found!' + '\033[0m')
    else:
        print('\033[33m' + 'loading generator model from ' + '\033[1m' + checkpoint_path + '\033[0m')
        checkpoint = torch.load(checkpoint_path, map_location=device)
        if 'gen_state_dict' in checkpoint:
            gen.load_state_dict(checkpoint['gen_state_dict'])
            iteration = checkpoint['iteration']
        else:
            gen.load_state_dict(checkpoint)
            iteration = cfg.train.total_iterations
    gen.eval()



    #======================================================================
    #
    ### Label Embedding 
    # 
    #======================================================================
   
    '''
    * Style Mode
       - single : Inference with each style
       - random : Randomly chosen 4 style
       - mix : Interpolation between two style
       - operation : Addition and subtraction with multiple style
    '''

    ## Define label embed function
    if args.model in ['MotionGAN', 'TCN']:
        z = 0 * gen.make_hidden(1, 1).to(device) if cfg.models.generator.use_z else None
        LabelEmbedder = lambda style: gen.latent_transform(z, style_to_label(style))
    elif args.model == 'ACGAN':
        z = 0 * gen.make_hidden(1, 1).to(device) if cfg.train.generator.use_z else None
        LabelEmbedder = lambda style: gen.label_emb(style_to_label(style))
    elif args.model == 'LSTM':
        LabelEmbedder = lambda style: gen.label_emb(style_to_label(style))
    else:
        pass

    # Define style name convert function
    def style_to_label(style):
        return torch.Tensor([cfg.train.dataset.class_list.index(style)]).type(torch.LongTensor).to(device) 



    ## Create name and embed w pair
    inputs = [] 

    #  Mode :  Single style inference
    if args.style_mode == 'single':
        style_list = args.style.split(',') if args.style else cfg.train.dataset.class_list 
        for style in style_list:
            w = LabelEmbedder(style)
            inputs.append((style, w))

    #  Mode :  Random style inference
    if args.style_mode == 'random':
        style_list = cfg.train.dataset.class_list 
        for style in random.sample(style_list, args.num_samples):
            w = LabelEmbedder(style)
            inputs.append((style, w))

    #  Mode :  Style Mix
    elif args.style_mode == 'mix':
        style1, style2 = args.style.split('-')
        w1 = LabelEmbedder(style1)
        w2 = LabelEmbedder(style2)
        inputs.append((style1, w1))
        for ratio in [0.5]:
            w = ratio * w1 + (1.0 - ratio) * w2
            inputs.append(('Mix'+str(ratio), w))
        inputs.append((style2, w2))

    #  Mode :  Style Operation
    elif args.style_mode == 'operation':
        def operation_parser(op_str):
            operands, operators = [], ['+']
            tail = 0 
            for i in range(len(op_str)):
                if op_str[i] in ['+', '-']:
                    operands.append(op_str[tail:i])
                    operators.append(op_str[i])
                    tail = i+1 
            operands.append(op_str[tail:])
            assert len(operands) == len(operators)
            return operands, operators
        operands, operators = operation_parser(args.style)
        # Embed first operand
        w_result = 0
        # Embed rest operands and calcurate operation 
        for operand, operator in zip(operands, operators):
            w = LabelEmbedder(operand)
            inputs.append((operand,w))
            if operator == '+':
                w_result += w
            elif operator == '-':
                w_result -= w
            else:
                raise ValueError('Invalid operator {operator}')
        inputs.append((args.style, w_result))
        


    #======================================================================
    #
    ### Define target data
    # 
    #======================================================================
   
    '''
       - dataset : Inference on test dataset specified in config
       - draw : Inference on draw curve (.pkl file)
    '''
    targets = []

    ### Mode : Inference on test dataset
    if args.target == 'dataset':
        print(f'\033[92mInference on  \033[1m{cfg.test.dataset.data_root}\033[0m')

        # Set up dataset
        test_dataset = BVHDataset(cfg.test.dataset, mode='test')
        # Prepare target data  
        for k in range(len(test_dataset)):
            x_data, control_data, _  = test_dataset[k]

            x_data = torch.from_numpy(x_data)
            control_data = torch.from_numpy(control_data) 

            # Convert tensor to batch
            x_data = x_data.unsqueeze(0).unsqueeze(1).type(torch.FloatTensor)
            control_data = control_data.unsqueeze(0).unsqueeze(1).type(torch.FloatTensor)
            original_curve = control_data[0,0,:,:].data.numpy() if not args.hide_curve else None
            # Generate input velocity spline
            v_control = control_data[:,:,1:,] - control_data[:,:,:-1,:]
            v_control = F.pad(v_control, (0,0,1,0), mode='reflect')
            v_control = v_control.to(device)
 
            targets.append({'name':f'{k:03d}', 'x_data':x_data, 'v_control':v_control, 'original_curve':original_curve})

    ### Mode : Inference on draw curve
    elif args.target == 'draw':
        print(f'\033[92mInference on  \033[1m{args.target_file}\33[0m')

        # Open .pkl file
        assert (args.target_file).endswith('.pkl')
        with open(args.target_file, mode='rb') as f:
            data = pickle.load(f)
            if 'trajectory' in data:
                trajectory_raw = data['trajectory']
                control_point_interval = len(trajectory_raw) // args.control_point if args.control_point is not None else 120
                control_data = convert_event_to_spline(trajectory_raw, control_point_interval=control_point_interval)
                data['control'] = control_data
            else:
                control_data = data['spline']
                trajectory_raw = []
            data_name = os.path.splitext(os.path.split(args.target_file)[1])[0]
            speed_modify = args.speed

        frame_step = int(cfg.train.frame_step // args.splinex * args.fps)
        control_data = torch.from_numpy(control_data).unsqueeze(0).unsqueeze(1).type(torch.FloatTensor)
        speed_modify /= scale

        batch_length = int(control_data.shape[2]/(frame_step*args.fps)) // 64 * 64
        control_data = F.interpolate(control_data, size=(batch_length, spline_data.shape[3]), mode='bilinear', align_corners=True)
        control_data *= args.splinex / speed_modify
        if args.hide_curve:
            original_curve = None
        else:
            original_curve = (control_data[0,0,:,:] - control_data[0,0,0,:]).data.numpy()
            original_curve[:,1] = original_curve[:,1] - original_curve[:,1]

        # Convert position to velocity
        v_control = control_data[:,:,1:,] - control_data[:,:,:-1,:]
        v_control = F.pad(v_control, (0,0,1,0), mode='reflect')
        v_control = Variable(v_control).to(device)

        targets.append({'name': data_name, 'v_control':v_control, 'original_curve':original_curve})





    #======================================================================
    #
    ###   Test Start
    #
    #======================================================================

    ## Define output directory
    if args.target == 'dataset':
        test_dataset_name = os.path.split(cfg.test.dataset.data_root)[1]
    elif args.target == 'draw':
        test_dataset_name = 'Draw' 
    result_dir_top = f'{cfg.test.out}/test/iter_{iteration}/{test_dataset_name}' if args.out is None else os.path.join(args.out, test_dataset_name)
    
    if not os.path.exists(result_dir_top):
        os.makedirs(result_dir_top)

    
    ## Testing option
    standard_bvh = cfg.test.dataset.standard_bvh if hasattr(cfg.test.dataset, 'standard_bvh') else 'core/datasets/CMU_standard.bvh'
    # Prepare initial pose for lstm
    if args.model == 'LSTM':
        if 'initial_pose' in checkpoint:
            initial_pose = checkpoint['initial_pose']
        else:
            initial_pose = np.load('core/utils/initial_post_lstm.npy')
            initial_pose = torch.from_numpy(initial_pose).view(1,1,1,-1).type(torch.FloatTensor)
    
    ## Generate each sample
    for test_data in targets:
        v_control = test_data['v_control']
        original_curve = test_data['original_curve']

        result_list = []
        for name, w in inputs: 
            start_time = time.time()
            original_curve_j = original_curve.copy()
       
            #----------------------------------------
            #   Inference with model
            #----------------------------------------
            ## MotionGAN
            if args.model == 'MotionGAN':
                fake_v_trajectory, x_fake_motion = gen(v_control, w=w)
            ## ACGAN
            elif args.model == 'ACGAN':
                fake_v_trajectory, x_fake_motion = gen(v_control, z.repeat(1,1,v_spline.shape[2],1), label_embed=w)
            ## TCN
            elif args.model == 'TCN':
                # Inference each 128 frames
                for t in range(0, v_spline.shape[2], 128):
                    fake_v_trajectory_t, x_fake_motion_t = gen(v_control[:,:,t:t+128,:], w=w)
                    fake_v_trajectory = fake_v_trajectory_t if t==0 else torch.cat((fake_v_trajectory, fake_v_trajectory_t), dim=2)
                    x_fake_motion = x_fake_motion_t if t==0 else torch.cat((x_fake_motion, x_fake_motion_t), dim=2)
            ## LSTM
            elif args.model == 'LSTM':
                traj_t, pose_t = v_control[:,:,:1,:], initial_pose.to(device)
                for t in range(v_control.shape[2]):
                    traj_t, pose_t = gen(v_control[:,:,t,:], pose_t, traj_t, label_embed=w)
                    fake_v_trajectory = traj_t if t==0 else torch.cat((fake_v_trajectory, traj_t), dim=2)
                    x_fake_motion = pose_t if t==0 else torch.cat((x_fake_motion, pose_t), dim=2)

            #---------------------------------------------------
            #   Convert model output to viewable joint position
            #---------------------------------------------------
            if x_fake_motion.shape[2] > args.start_frame:
                x_fake_motion = x_fake_motion[:,:,args.start_frame:,:]
                fake_v_trajectory = fake_v_trajectory[:,:,args.start_frame:,:]
                if original_curve_j is not None:
                    original_curve_j = original_curve_j[args.start_frame:,:] - original_curve_j[args.start_frame,:]
            else:
                if original_curve_j is not None:
                    original_curve_j = original_curve_j - original_curve_j[0,:]



            # Root position at start frame
            start_position = torch.zeros(1,1,1,3)
            if re.search(r"OldMan|Sneaky|Scared|Chicken|Dinosaur", name) is not None:
                start_position[0,0,0,1] = 15.0 / cfg.test.dataset.scale
            else:
                start_position[0,0,0,1] = 17.0 / cfg.test.dataset.scale

            # Velocity to positon
            fake_trajectory = reconstruct_v_trajectory(fake_v_trajectory.data.cpu(), start_position)
            x_fake = torch.cat((fake_trajectory, x_fake_motion.cpu()), dim=3)
            result_list.append({'caption': name, 'motion': x_fake.detach()[0,:,:,:], 'control': original_curve_j}) 

 
            # Measure time 
            avg_time = (time.time() - start_time)
    
            #------------------------------------------------
            #   Save each sample  
            #------------------------------------------------
            if args.save_separate:
                if args.style_mode == 'single':
                    result_dir = os.path.join(result_dir_top, name)
                else:
                    result_dir = os.path.join(result_dir_top, args.style)

                if not os.path.exists(result_dir):
                    os.mkdir(result_dir)
                result_path = os.path.join(result_dir, test_data['name']+'_'+name)
                print(f'\nInference : {result_path} {x_fake[0,0,:,:].shape}  Time: {avg_time:.05f}') 
                # Save result data
                with open(result_path+'.pkl', 'wb') as f:
                    pickle.dump(result_dic, f)
                # Save video
                save_video(result_path+'.'+args.save_format, result_list, cfg.test, camera_move='stand', elev=args.elev, azim=args.azim)
                if args.save_pics:
                    save_timelapse(result_path+'.png', result_list, cfg.test)
                result_list = []

        #------------------------------------------------
        #   Save all frame in one video
        #------------------------------------------------
        if not args.save_separate:
            result_dir = os.path.join(result_dir_top, args.style)
            if not os.path.exists(result_dir):
                os.mkdir(result_dir)
            result_path = os.path.join(result_dir, test_data['name'])
            print(f'\nInference : {result_path} {x_fake[0,0,:,:].shape}  Time: {avg_time:.05f}') 
            # Save video
            save_video(result_path+'.'+args.save_format, result_list, cfg.test, camera_move='stand', elev=args.elev, azim=args.azim)
            if args.save_pics:
                save_timelapse(result_path+'.png', result_list, cfg.test)
Exemplo n.º 10
0
def calcurate_style_meanstd():
    parser = argparse.ArgumentParser()
    parser.add_argument('config')
    parser.add_argument('--out', required=True)
    args = parser.parse_args()

    cfg = Config.from_file(args.config)
    npy_paths = collect_path(cfg.train.dataset)

    head, ext = os.path.splitext(cfg.train.dataset)
    head, data_name = os.path.split(head)

    # Prepare skelton
    skelton, non_end_bones, joints_to_index, permute_xyz_order = btoj.get_standard_format(
        cfg.standard_bvh)
    _, non_zero_joint_to_index = btoj.cut_zero_length_bone(
        skelton, joints_to_index)

    Lul_offset = np.array(skelton['LeftUpLeg']['offsets'])
    Rul_offset = np.array(skelton['RightUpLeg']['offsets'])
    Lul_index = non_zero_joint_to_index['LeftUpLeg']
    Rul_index = non_zero_joint_to_index['RightUpLeg']

    standard_vector = Lul_offset - Rul_offset
    standard_norm = np.sqrt((Lul_offset[0] - Rul_offset[0])**2 +
                            (Lul_offset[2] - Rul_offset[2])**2)

    # Initialize
    result = {
        style: {joint: 0
                for joint in non_zero_joint_to_index}
        for style in cfg.train.class_list
    }

    data = None
    for npy_path in npy_paths:
        motion = np.load(npy_path)
        # Cut zero from motion
        _, motion = btoj.cut_zero_length_bone_frames(motion, skelton,
                                                     joints_to_index)

        # Convert trajectory to velocity
        motion = motion[1:]
        trajectory = motion[:, :3]
        velocity = trajectory[1:, :] - trajectory[:-1, :]
        motion = np.concatenate((velocity, motion[1:, 3:]), axis=1)

        # Get orientation ('xz' only)
        motion_oriented = np.zeros_like(motion)
        leftupleg = motion[:, Lul_index * 3:Lul_index * 3 + 3]
        rightupleg = motion[:, Rul_index * 3:Rul_index * 3 + 3]
        vector = leftupleg - rightupleg
        norm = np.sqrt(vector[:, 0]**2 + vector[:, 2]**2)

        cos = (vector[:, 0] * standard_vector[0] +
               vector[:, 2] * standard_vector[2]) / (norm * standard_norm)
        cos = np.clip(cos, -1, 1)
        sin = 1 - cos**2

        for t in range(motion.shape[0]):
            rotation_mat = np.array([[cos[t], 0., -sin[t]], [0., 1., 0.],
                                     [sin[t], 0., cos[t]]])
            motion_oriented[t, :] = np.dot(
                rotation_mat.T, motion[t, :].reshape(28, 3).T).T.reshape(-1, )

        # Set class
        npy_name = os.path.splitext(os.path.split(npy_path)[1])[0]
        style = npy_name.split('_')[0]

        mean = np.mean(motion[1:], axis=0)
        std = np.std(motion[1:], axis=0)

        mean_oriented = np.mean(motion_oriented, axis=0)
        std_oriented = np.std(motion_oriented, axis=0)

        # Write
        for joint in non_zero_joint_to_index:
            ji = non_zero_joint_to_index[joint]
            result[style][joint] = {
                'mean': mean_oriented[ji * 3:ji * 3 + 3],
                'std': std_oriented[ji * 3:ji * 3 + 3]
            }

    with open(args.out, 'wb') as f:
        pickle.dump(result, f)

    ##t-SNE
    mean_data, std_data, label_data = [], [], []
    for style in result.keys():
        data = result[style]
        mean_data.append(
            [data[joint]['mean'] for joint in non_zero_joint_to_index])
        std_data.append(
            [data[joint]['std'] for joint in non_zero_joint_to_index])
        label_data.append(cfg.train.class_list.index(style))
    mean_data = np.array(mean_data).reshape(len(mean_data), -1)
    std_data = np.array(std_data).reshape(len(mean_data), -1)
    label_data = np.array(label_data).reshape(len(mean_data), )

    plt.figure(figsize=(30, 30), dpi=72)

    # joint mean
    mean_velocity = np.stack((mean_data[:, 6], mean_data[:, 7]), axis=1)

    plt.subplot(331)
    plt.scatter(mean_velocity[:, 0], mean_velocity[:, 1], s=25)
    plt.title('mean' + list(non_zero_joint_to_index.keys())[2])
    for i in range(mean_velocity.shape[0]):
        point = mean_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # joint std
    std_velocity = np.stack((std_data[:, 6], std_data[:, 7]), axis=1)

    plt.subplot(332)
    plt.scatter(std_velocity[:, 0], std_velocity[:, 1], s=25)
    plt.title('std_' + list(non_zero_joint_to_index.keys())[2])
    for i in range(std_velocity.shape[0]):
        point = std_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # PCA mean
    pca = PCA(n_components=2)
    mean_pca = pca.fit_transform(mean_data)

    plt.subplot(333)
    plt.scatter(mean_pca[:, 0], mean_pca[:, 1], s=25)
    plt.title('pca_mean')
    for i in range(mean_pca.shape[0]):
        point = mean_pca[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # joint mean
    mean_velocity = np.stack((mean_data[:, 36], mean_data[:, 37]), axis=1)

    plt.subplot(334)
    plt.scatter(mean_velocity[:, 0], mean_velocity[:, 1], s=25)
    plt.title('mean_' + list(non_zero_joint_to_index.keys())[12])
    for i in range(mean_velocity.shape[0]):
        point = mean_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # joint std
    std_velocity = np.stack((std_data[:, 36], std_data[:, 37]), axis=1)

    plt.subplot(335)
    plt.scatter(std_velocity[:, 0], std_velocity[:, 1], s=25)
    plt.title('std_' + list(non_zero_joint_to_index.keys())[12])
    for i in range(std_velocity.shape[0]):
        point = std_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # PCA mean
    pca = PCA(n_components=2)
    std_pca = pca.fit_transform(std_data)

    plt.subplot(336)
    plt.scatter(std_pca[:, 0], std_pca[:, 1], s=25)
    plt.title('pca_std')
    for i in range(std_pca.shape[0]):
        point = std_pca[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # joint mean
    mean_velocity = np.stack((mean_data[:, 60], mean_data[:, 61]), axis=1)

    plt.subplot(337)
    plt.scatter(mean_velocity[:, 0], mean_velocity[:, 1], s=25)
    plt.title('mean_' + list(non_zero_joint_to_index.keys())[20])
    for i in range(mean_velocity.shape[0]):
        point = mean_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # joint std
    std_velocity = np.stack((std_data[:, 60], std_data[:, 61]), axis=1)

    plt.subplot(338)
    plt.scatter(std_velocity[:, 0], std_velocity[:, 1], s=25)
    plt.title('std_' + list(non_zero_joint_to_index.keys())[20])
    for i in range(std_velocity.shape[0]):
        point = std_velocity[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    # PCA mean and std
    pca = PCA(n_components=2)
    mean_std_pca = pca.fit_transform(
        np.concatenate((mean_data, std_data), axis=1))

    plt.subplot(339)
    plt.scatter(mean_std_pca[:, 0], mean_std_pca[:, 1], s=25)
    plt.title('pca_mean_std')
    for i in range(mean_std_pca.shape[0]):
        point = mean_std_pca[i]
        label = cfg.train.class_list[label_data[i]]
        plt.text(point[0], point[1], label, fontsize=8)

    plt.savefig(os.path.splitext(args.out)[0] + '_tSNE.png')

    # 3D PCA mean and std
    fig = plt.figure(figsize=(10, 10), dpi=72)
    ax = Axes3D(fig)
    pca = PCA(n_components=3)
    mean_std_pca3d = pca.fit_transform(
        np.concatenate((mean_data, std_data), axis=1))

    ax.scatter3D(mean_std_pca3d[:, 0],
                 mean_std_pca3d[:, 1],
                 mean_std_pca3d[:, 2],
                 s=25)
    for i in range(mean_std_pca.shape[0]):
        point = mean_std_pca3d[i]
        label = cfg.train.class_list[label_data[i]]
        ax.text(point[0], point[1], point[2], label, fontsize=8)

    plt.savefig(os.path.splitext(args.out)[0] + '_PCA3D.png')
Exemplo n.º 11
0
def main():
    args = parse_args()
    cfg = Config.from_file(args.config)

    ## Set ?PU device
    cuda = torch.cuda.is_available()
    if cuda and args.gpu > -1:
        print('\033[1m\033[91m' + '# cuda available!' + '\033[0m')
        device = torch.device(f'cuda:{args.gpu}')
    else:
        device = 'cpu'

    # Set up generator network
    num_class = len(cfg.train.dataset.class_list)
    gen = getattr(models, cfg.models.generator.model)(cfg.models.generator,
                                                      num_class).to(device)

    # Load weight
    if args.weight is None:
        checkpoint_path = os.path.join(cfg.test.out, 'gen.pth')
        if not os.path.exists(checkpoint_path):
            checkpoint_path = sorted(
                glob.glob(
                    os.path.join(cfg.test.out, 'checkpoint',
                                 'iter_*.pth.tar')))[-1]
    else:
        checkpoint_path = args.weight

    if not os.path.exists(checkpoint_path):
        print('Generator weight not found!')
    else:
        print(f'Loading generator model from \033[1m{checkpoint_path}\033[0m')
        checkpoint = torch.load(checkpoint_path, map_location=device)
        if 'gen_state_dict' in checkpoint:
            gen.load_state_dict(checkpoint['gen_state_dict'])
            iteration = checkpoint['iteration']
        else:
            gen.load_state_dict(checkpoint)
            iteration = cfg.train.total_iterations
    gen.eval()

    ## Create name and embed w pair
    inputs = []
    z = gen.make_hidden(1,
                        1).to(device) if cfg.models.generator.use_z else None
    for i, style in enumerate(cfg.train.dataset.class_list):
        label = torch.Tensor([i]).type(torch.LongTensor).to(device)
        w = gen.latent_transform(z, label)
        inputs.append((style, w))

    # Each label corresponds to rows
    rows = ['Average'] + [s[0] for s in inputs]

    #======================================================================
    #
    ### Prepare target data
    #
    #======================================================================

    print(f'Inference on  \033[1m{cfg.test.dataset.data_root}\033[0m')
    targets = []
    # Set up dataset
    test_dataset = BVHDataset(cfg.test.dataset, mode='test')
    # Prepare target data
    for k in range(len(test_dataset)):
        x_data, control_data, _ = test_dataset[k]

        x_data = torch.from_numpy(x_data)
        control_data = torch.from_numpy(control_data)

        # Convert tensor to batch
        x_data = x_data.unsqueeze(0).unsqueeze(1).type(torch.FloatTensor)
        control_data = control_data.unsqueeze(0).unsqueeze(1).type(
            torch.FloatTensor)
        original_curve = control_data[0, 0, :, :]
        # Generate input velocity spline
        v_control = control_data[:, :, 1:, ] - control_data[:, :, :-1, :]
        v_control = F.pad(v_control, (0, 0, 1, 0), mode='reflect')
        v_control = v_control.to(device)

        targets.append({
            'name': f'{k:03d}',
            'x_data': x_data,
            'v_control': v_control,
            'original_curve': original_curve
        })

    # Each target data corresponds to columns
    columns = ['Average'] + [data['name'] for data in targets]

    #======================================================================
    #
    ###   Test Start
    #
    #======================================================================

    ## Define output directory
    test_dataset_name = os.path.split(cfg.test.dataset.data_root)[1]
    result_dir = f'{cfg.test.out}/eval/iter_{iteration}/{test_dataset_name}'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    ## Testing option
    standard_bvh = cfg.test.dataset.standard_bvh if hasattr(
        cfg.test.dataset, 'standard_bvh') else 'core/datasets/CMU_standard.bvh'
    skelton, _, joints_to_index, _ = get_standard_format(standard_bvh)
    _, non_zero_joint_to_index = cut_zero_length_bone(skelton, joints_to_index)
    id_rightleg = non_zero_joint_to_index['RightToeBase']
    id_leftleg = non_zero_joint_to_index['LeftToeBase']

    ## Evaluate each sample
    trajectory_error_data = np.zeros((len(rows), len(columns)))
    footskate_data = np.zeros((len(rows), len(columns)))
    for i, test_data in enumerate(targets):
        v_control = test_data['v_control']
        original_curve = test_data['original_curve']
        original_curve = original_curve - original_curve[0, :]

        result_dic = {}
        for j, (name, w) in enumerate(inputs):
            start_time = time.time()

            #----------------------------------------
            #   Inference with model
            #----------------------------------------
            fake_v_trajectory, x_fake = gen(v_control, w=w)

            # Velocity to positon
            fake_trajectory = reconstruct_v_trajectory(
                fake_v_trajectory.data.cpu(), torch.zeros(1, 1, 1, 3))
            x_fake = torch.cat((fake_trajectory, x_fake.cpu()), dim=3)

            # Denormalize
            x_fake *= cfg.test.dataset.scale

            #---------------------------------------------------
            #   Calcurate on metrics
            #---------------------------------------------------
            frame_length = x_fake.shape[2]
            # Calcurlate foot skating distance
            footskate_dist = calcurate_footskate(
                x_fake[0, 0, :, :3],
                x_fake[0, 0, :, id_rightleg * 3:id_rightleg * 3 + 3],
                x_fake[0, 0, :,
                       id_leftleg * 3:id_leftleg * 3 + 3]) / frame_length
            footskate_data[j + 1, i + 1] = round(footskate_dist, 6)
            # Calcurlate trajectory error
            error_dist = calcurate_trajectory_error(x_fake[0, 0, :, :3],
                                                    original_curve, 8, 32)
            trajectory_error_data[j + 1, i + 1] = round(error_dist, 6)

    #---------------------------------------------------
    #   Merge all results
    #---------------------------------------------------
    for name, data in [('trajectory_erorr_dist', trajectory_error_data),
                       ('footskate_dist', footskate_data)]:
        # Get average
        data[:, 0] = np.sum(data[:, 1:], axis=1) / (len(columns) - 1)
        data[0, :] = np.sum(data[1:, :], axis=0) / (len(rows) - 1)
        data = data.tolist()

        # Save as csv
        df = pd.DataFrame(data, index=rows, columns=columns)
        print(name, '\n', df)
        df.to_csv(os.path.join(result_dir, f'{name}.csv'))
Exemplo n.º 12
0
def test():
    global args, cfg, device

    args = parse_args()
    cfg = Config.from_file(args.config)

    # Set ?PU device
    cuda = torch.cuda.is_available()
    if cuda:
        print('\033[1m\033[91m' + '# cuda available!' + '\033[0m')
        device = torch.device(f'cuda:{args.gpu}')
    else:
        device = 'cpu'

    #####################################################
    ## Prepare for test
    #####################################################

    # Set up generator network
    num_class = len(cfg.train.dataset.class_list)
    gen = getattr(models, cfg.models.generator.model)(cfg.models.generator,
                                                      num_class).to(device)

    total_params = sum(p.numel() for p in gen.parameters() if p.requires_grad)
    print(f'Total parameter amount : \033[1m{total_params}\033[0m')

    # Load weight
    if args.weight is not None:
        checkpoint_path = args.weight
    else:
        checkpoint_path = os.path.join(cfg.test.out, 'gen.pth')
        if not os.path.exists(checkpoint_path):
            checkpoint_path = sorted(
                glob.glob(
                    os.path.join(cfg.test.out, 'checkpoint',
                                 'iter_*.pth.tar')))[-1]

    if not os.path.exists(checkpoint_path):
        print('Generator weight not found!')
    else:
        print(f'Loading generator model from \033[1m{checkpoint_path}\033[0m')
        checkpoint = torch.load(checkpoint_path, map_location=device)
        if 'gen_state_dict' in checkpoint:
            gen.load_state_dict(checkpoint['gen_state_dict'])
            iteration = checkpoint['iteration']
        else:
            gen.load_state_dict(checkpoint)
            iteration = cfg.train.total_iterations
    gen.eval()

    # Set up dataset
    test_dataset = BVHDataset(cfg.test.dataset, mode='test')
    test_dataset_name = os.path.split(
        cfg.test.dataset.data_root.replace('*', ''))[1]

    # Set standard bvh
    standard_bvh = cfg.test.dataset.standard_bvh if hasattr(
        cfg.test.dataset, 'standard_bvh') else 'core/datasets/CMU_standard.bvh'

    # Create output directory
    result_dir = f'{cfg.test.out}/test/iter_{iteration}/{test_dataset_name}'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    #####################################################
    ## Test start
    #####################################################
    for i in range(len(test_dataset)):
        x_data, control_data, label = test_dataset[i]

        if x_data.shape[
                0] < cfg.train.dataset.frame_nums // cfg.train.dataset.frame_step:
            continue

        # Motion and control signal data
        x_data = torch.from_numpy(x_data).unsqueeze(0).unsqueeze(1).type(
            torch.FloatTensor)
        x_real = Variable(x_data).to(device)

        control_data = torch.from_numpy(control_data).unsqueeze(0).unsqueeze(
            1).type(torch.FloatTensor)
        control = control_data.to(device)

        # Convert root trajectory to verocity
        gt_trajectory = x_data[:, :, :, 0:3]
        gt_v_trajectory = gt_trajectory[:, :,
                                        1:, :] - gt_trajectory[:, :, :-1, :]
        gt_v_trajectory = F.pad(gt_v_trajectory, (0, 0, 1, 0), mode='reflect')
        gt_v_trajectory = Variable(gt_v_trajectory).to(device)

        # Convert control curve to velocity
        v_control = control[:, :, 1:, ] - control[:, :, :-1, :]
        v_control = F.pad(v_control, (0, 0, 1, 0), mode='reflect')
        v_control = Variable(v_control).to(device)

        results_list = []
        start_time = time.time()

        # Generate fake sample
        for k in range(args.num_samples):
            # Generate noize z
            z = gen.make_hidden(1, x_data.shape[2]).to(
                device) if cfg.models.generator.use_z else None
            fake_label = torch.randint(0,
                                       len(cfg.train.dataset.class_list),
                                       size=(1, )).type(
                                           torch.LongTensor).to(device)

            fake_v_trajectory, x_fake = gen(v_control, z, fake_label)
            fake_trajectory = reconstruct_v_trajectory(
                fake_v_trajectory.data.cpu(), torch.zeros(1, 1, 1, 3))

            caption = f'{cfg.train.dataset.class_list[fake_label]}_{k}'
            results_list.append({
                'caption':
                caption,
                'motion':
                torch.cat((fake_trajectory, x_fake.data.cpu()), dim=3),
                'control':
                control.data.cpu()
            })

        avg_time = (time.time() - start_time) / args.num_samples

        # Save results
        result_path = result_dir + f'/{i:03d}.avi'
        print(
            f'\nInference : {result_path} ({v_control.shape[2]} frames) Time: {avg_time:.05f}'
        )
        save_video(result_path, results_list, cfg.test)
Exemplo n.º 13
0
def main():
    global cfg, best_PSNR
    args = parse_args()
    cfg = Config.from_file(args.config)

    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
        str(gpu) for gpu in cfg.device)
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    cudnn.benchmark = True
    cudnn.fastest = True

    if hasattr(datasets, cfg.dataset):
        ds = getattr(datasets, cfg.dataset)
    else:
        raise ValueError('Unknown dataset ' + cfg.dataset)

    model = getattr(models, cfg.model.name)(cfg.model).cuda()
    cfg.train.input_mean = model.input_mean
    cfg.train.input_std = model.input_std
    cfg.test.input_mean = model.input_mean
    cfg.test.input_std = model.input_std

    # Data loading code
    # train_loader = torch.utils.data.DataLoader(
    #     ds(cfg.train),
    #     batch_size=cfg.train.batch_size,
    #     shuffle=True,
    #     num_workers=0, #32,
    #     pin_memory=True,
    #     drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.UCF101(cfg.test, False),
        batch_size=cfg.test.batch_size,
        shuffle=False,
        num_workers=0, #32,
        pin_memory=True)

    cfg.train.optimizer.args.max_iter = (
        cfg.train.optimizer.args.max_epoch * len(train_loader))

    policies = model.get_optim_policies()
    for group in policies:
        print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
            group['name'],
            len(group['params']), group['lr_mult'], group['decay_mult'])))
    optimizer = Optim(policies, cfg.train.optimizer)

    if cfg.resume or cfg.weight:
        checkpoint_path = cfg.resume if cfg.resume else cfg.weight
        if os.path.isfile(checkpoint_path):
            print(("=> loading checkpoint '{}'".format(checkpoint_path)))
            checkpoint = torch.load(checkpoint_path)
            model.load_state_dict(checkpoint['state_dict'], False)
            if cfg.resume:
                optimizer.load_state_dict(checkpoint['grad_dict'])
        else:
            print(("=> no checkpoint found at '{}'".format(checkpoint_path)))

    model = DataParallelwithSyncBN(
        model, device_ids=range(len(cfg.device))).cuda()

    # define loss function (criterion) optimizer and evaluator
    criterion = torch.nn.MSELoss().cuda()
    evaluator = EvalPSNR(255.0 / np.mean(cfg.test.input_std))

    # PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
    # return

    for epoch in range(cfg.train.optimizer.args.max_epoch):

        # train for one epoch
        # train(train_loader, model, optimizer, criterion, epoch)
        # evaluate on validation set
        if ((epoch + 1) % cfg.logging.eval_freq == 0
                or epoch == cfg.train.optimizer.args.max_epoch - 1):
            PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
            # remember best PSNR and save checkpoint
            is_best = PSNR > best_PSNR
            best_PSNR = max(PSNR, best_PSNR)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': dict(cfg),
                'state_dict': model.module.state_dict(),
                'grad_dict': optimizer.state_dict(),
                'best_PSNR': best_PSNR,
            }, is_best)