Exemple #1
0
def loop(args, exp_num):
    args_subset = ['exp', 'cpk', 'speaker', 'model']
    args_dict_update = {
        'render': args.render,
        'window_hop': 0,
        'sample_all_styles': args.sample_all_styles
    }
    args_dict_update.update(
        get_args_update_dict(args))  ## update all the input args

    ## Load Args
    book = BookKeeper(args,
                      args_subset,
                      args_dict_update=args_dict_update,
                      tensorboard=args.tb)
    args = book.args

    ## choose trainer
    Trainer = trainer_chooser(args)

    ## Init Trainer
    trainer = Trainer(args, args_subset, args_dict_update)

    trainer.book._set_seed()
    ## Sample
    trainer.sample(exp_num)

    ## Finish exp
    trainer.finish_exp()

    ## Print Experiment No.
    print(args.exp)
Exemple #2
0
def makeHTMLfile_loop(args, exp_num):
  assert args.load, 'Load file must be provided'
  assert os.path.exists(args.load), 'Load file must exist'
  
  args_subset = ['exp', 'cpk', 'speaker', 'model']
  book = BookKeeper(args, args_subset, args_dict_update={'render':args.render},
                    tensorboard=args.tb)
  args = book.args

  dir_name = book.name.dir(args.save_dir)

  makeHTMLfile(dir_name, idxs=args.render, outfile='videos')
  makeHTMLfile(dir_name, idxs=4, outfile='videos_subset')
def train(args, exp_num, data=None):
    #assert args.load, 'Model name not provided'
    #assert os.path.isfile(args.load), 'Model file not found'
    if args.load and os.path.isfile(args.load):
        load_pretrained_model = True
    else:
        load_pretrained_model = False
    args_subset = ['exp', 'cpk', 'model', 'time', 'chunks']
    book = BookKeeper(args,
                      args_subset,
                      args_dict_update={
                          'chunks': args.chunks,
                          'batch_size': args.batch_size,
                          'model': args.model,
                          's2v': args.s2v,
                          'cuda': args.cuda,
                          'save_dir': args.save_dir,
                          'early_stopping': args.early_stopping,
                          'debug': args.debug,
                          'stop_thresh': args.stop_thresh,
                          'desc': args.desc,
                          'curriculum': args.curriculum,
                          'lr': args.lr
                      },
                      tensorboard=args.tb,
                      load_pretrained_model=load_pretrained_model)
    ## load_pretrained_model makes sure that the model is loaded, old save files are not updated and _new_exp is called to assign new filename

    args = book.args

    ## Start Log
    book._start_log()

    ## Training parameters
    path2data = args.path2data
    dataset = args.dataset
    lmksSubset = args.lmksSubset
    desc = args.desc
    split = (args.train_frac, args.dev_frac)
    idx_dependent = args.idx_dependent
    batch_size = args.batch_size
    time = args.time
    global chunks
    chunks = args.chunks
    offset = args.offset
    mask = args.mask
    feats_kind = args.feats_kind
    s2v = args.s2v
    f_new = args.f_new
    curriculum = args.curriculum

    if args.debug:
        shuffle = False
    else:
        shuffle = True

    ## Load data iterables
    if data is None:
        data = Data(path2data,
                    dataset,
                    lmksSubset,
                    desc,
                    split,
                    batch_size=batch_size,
                    time=time,
                    chunks=chunks,
                    offset=offset,
                    shuffle=shuffle,
                    mask=mask,
                    feats_kind=feats_kind,
                    s2v=s2v,
                    f_new=f_new)
        print('Data Loaded')
    else:
        print('Data already loaded! Yesss!!')

    train = data.train
    dev = data.dev
    test = data.test

    ## Create a model
    device = torch.device('cuda:{}'.format(
        args.cuda)) if args.cuda >= 0 else torch.device('cpu')
    input_shape = data.input_shape
    kwargs_keys = ['pose_size', 'trajectory_size']
    modelKwargs = {key: input_shape[key] for key in kwargs_keys}
    modelKwargs.update(args.modelKwargs)

    ## TODO input_size is hardcoded to the w2v input size. can be extracted from Data
    if args.s2v:
        input_size = 300
    elif args.desc:
        input_size = len(args.desc)
    else:
        input_size = 0
    model = eval(args.model)(chunks,
                             input_size=input_size,
                             Seq2SeqKwargs=modelKwargs,
                             load=args.load)
    model.to(device).double()
    book._copy_best_model(model)
    print('Model Created')

    ## would have to skip this way of loading model
    #if args.load:
    #  print('Loading Model')
    #  book._load_model(model)

    ## Loss function
    criterion = Loss(args.losses, args.lossKwargs)

    ## Optimizers
    optim = torch.optim.Adam(model.parameters(), lr=args.lr)

    ## LR scheduler
    scheduler = lr_scheduler.ExponentialLR(optim, gamma=0.99)

    ## Transforms
    columns = get_columns(feats_kind, data)
    pre = Transforms(args.transforms, columns, args.seed, mask, feats_kind,
                     dataset, f_new)

    def loop(model, data, pre, desc='train', epoch=0):
        running_loss = 0
        running_internal_loss = 0
        running_count = 0

        if desc == 'train':
            model.train(True)
        else:
            model.eval()

        Tqdm = tqdm(data,
                    desc=desc + ' {:.4f}'.format(0),
                    leave=False,
                    ncols=20)
        for count, batch in enumerate(Tqdm):
            model.zero_grad()
            optim.zero_grad()
            X, Y, s2v = batch['input'], batch['output'], batch['desc']
            pose, trajectory, start_trajectory = X
            pose_gt, trajectory_gt, start_trajectory_gt = Y

            x = torch.cat((trajectory, pose), dim=-1)
            y = torch.cat((trajectory_gt, pose_gt), dim=-1)

            x = x.to(device)
            y = y.to(device)
            if isinstance(s2v, torch.Tensor):
                s2v = s2v.to(device)

            ## Transform before the model
            x = pre.transform(x)
            y = pre.transform(y)

            if desc == 'train':
                y_cap, internal_losses = model(x, s2v, train=True)
            else:
                y_cap, internal_losses = model(x, s2v, train=False)

            loss = 0
            loss_ = 0
            if y_cap is not None:  ## if model returns only internal losses
                loss = criterion(y_cap, y)
                loss_ = loss.item()

            for i_loss in internal_losses:
                loss += i_loss
                loss_ += i_loss.item()
                running_internal_loss += i_loss.item()

            running_count += np.prod(y.shape)
            running_loss += loss_
            ## update tqdm
            Tqdm.set_description(desc + ' {:.4f} {:.4f}'.format(
                running_loss / running_count, running_internal_loss /
                running_count))
            Tqdm.refresh()

            if desc == 'train':
                loss.backward()
                optim.step()

            x = x.detach()
            y = y.detach()
            loss = loss.detach()
            if y_cap is not None:
                y_cap = y_cap.detach()
            internal_losses = [i.detach() for i in internal_losses]
            if count >= 0 and args.debug:  ## debugging by overfitting
                break

        return running_loss / running_count

    num_epochs = args.num_epochs

    ## set up curriculum learning for training
    time_list = []
    time_list_idx = 0
    if curriculum:
        for power in range(1, int(np.log2(time - 1)) + 1):
            time_list.append(2**power)
        data.update_dataloaders(time_list[0])
    time_list.append(time)
    tqdm.write('Training up to time: {}'.format(time_list[time_list_idx]))

    ## Training Loop
    for epoch in tqdm(range(num_epochs), ncols=20):
        train_loss = loop(model, train, pre, 'train', epoch)
        dev_loss = loop(model, dev, pre, 'dev', epoch)
        test_loss = loop(model, test, pre, 'test', epoch)
        scheduler.step()  ## Change the Learning Rate

        ## save results
        book.update_res({
            'train': train_loss,
            'dev': dev_loss,
            'test': test_loss
        })
        book._save_res()

        ## update tensorboard
        book.update_tb({
            'scalar': [[f'{args.cpk}/train', train_loss, epoch],
                       [f'{args.cpk}/dev', dev_loss, epoch],
                       [f'{args.cpk}/test', test_loss, epoch]]
        })

        # 'histogram':[[f'{args.cpk}/'+name, param.clone().cpu().detach().numpy(), epoch]
        #             for name, param in model.named_parameters()]})

        ## print results
        book.print_res(epoch,
                       key_order=['train', 'dev', 'test'],
                       exp=exp_num,
                       lr=scheduler.get_lr())

        if book.stop_training(model, epoch):
            ## if early_stopping criterion is met,
            ## start training with more time steps
            time_list_idx += 1
            book.stop_count = 0  ## reset the threshold counter
            book.best_dev_score = np.inf
            model.load_state_dict(copy.deepcopy(book.best_model))
            if len(time_list) > time_list_idx:
                time_ = time_list[time_list_idx]
                data.update_dataloaders(time_)
                tqdm.write('Training up to time: {}'.format(time_))
            else:
                break

    ## Sample
    print('Loading the best model and running the sample loop')
    args.__dict__.update({
        'load':
        book.name(book.weights_ext[0], book.weights_ext[1], args.save_dir)
    })
    sample(args, exp_num, data)

    ## Render (on a cpu only node)
    # feats_kind_dict = {'rifke':'fke'}
    # print('Rendering')
    # render = Slurm('render', slurm_kwargs={'partition':'cpu_long', 'time':'10-00:00', 'n':10})
    # python_cmd = ['source activate torch',
    #               'python render.py -dataset {} -load {} -feats_kind {} -render_list {}'.format(
    #                 args.dataset,
    #                 args.load,
    #                 feats_kind_dict[args.feats_kind],
    #                 args.render_list)]
    # render.run('\n'.join(python_cmd))

    ## Render new sentences
    print('Rendering New Sentences')
    render_new_sentences(args, exp_num, data)

    # End Log
    book._stop_log()
Exemple #4
0
def sample(args, exp_num):
    assert args.load, '-load should not be None'
    assert os.path.exists(args.load), '-load should exist'

    args_subset = ['exp', 'cpk', 'model']
    book = BookKeeper(args,
                      args_subset,
                      args_dict_update={
                          'batch_size': 1,
                          'stft_window': None
                      },
                      tensorboard=args.tb)

    dir_name = book.name.dir(args.save_dir)

    args = book.args

    ## Training parameters
    path2data = args.path2data
    dataset = args.dataset
    split = (args.train_frac, args.dev_frac)
    batch_size = args.batch_size
    stft_window = args.stft_window
    stft_hop = args.stft_hop
    n_fft = args.n_fft
    hop_length = args.hop_length
    win_length = args.win_length

    ## Load data iterables
    data = Data(path2data,
                dataset,
                split,
                batch_size=batch_size,
                stft_window=stft_window,
                stft_hop=stft_hop,
                n_fft=n_fft,
                hop_length=hop_length,
                win_length=win_length,
                shuffle=False)

    train = data.train
    dev = data.dev
    test = data.test

    print('Data Loaded')

    ## Create a model
    device = torch.device('cuda:{}'.format(
        args.cuda)) if args.cuda >= 0 else torch.device('cpu')

    modelKwargs = {'feat_dim': data.shape}
    modelKwargs.update(args.modelKwargs)

    model = eval(args.model)(**modelKwargs)
    model.to(device).double()

    book._copy_best_model(model)
    print('Model Created')

    ## Load model
    if args.load:
        print('Loading Model')
        book._load_model(model)

    ## Loss function
    criterion = Loss(args.losses, args.lossKwargs)

    ## Transforms
    pre = Transforms(args.transforms, n_fft, hop_length, win_length, args)

    def loop(model, data_cls, data, pre, desc='train'):
        running_loss = 0
        running_count = 0

        model.eval()
        for count, batch in tqdm(enumerate(data),
                                 desc=desc,
                                 leave=False,
                                 ncols=20):
            x = batch.double()
            y = x.clone()
            batch_size = x.shape[0]

            x = x.to(device)
            y = y.to(device)

            ## Transform before the model
            x = pre.transform(x)
            y = pre.transform(y)

            #y_cap = x
            #internal_losses = []
            _, _, _, _, _, _, y_cap, internal_losses = model(x)

            ## save spectrogram as wavfiles
            save_path = Path(dir_name) / Path(desc) / Path(
                data.dataset.datasets[count].audiopath).relative_to(
                    args.path2data)
            os.makedirs(save_path.parent, exist_ok=True)
            save_path = save_path.as_posix()

            sr = data.dataset.datasets[count].sr
            time_shape = data.dataset.datasets[count].time_shape
            spectrogram = pre.inv_transform(y_cap).squeeze().transpose(
                1, 0).contiguous().cpu().detach().numpy()
            spec2wav(save_path,
                     sr,
                     spectrogram,
                     time_shape,
                     n_fft,
                     hop_length,
                     win_length,
                     num_iter=500)

            loss = criterion(y_cap, y)
            for i_loss in internal_losses:
                loss += i_loss

            running_loss += loss.item() * batch_size
            running_count += batch_size

            if count >= 0 and args.debug:  ## debugging by overfitting
                break

        return running_loss / running_count

    train_loss = loop(model, data, train, pre, 'train')
    dev_loss = loop(model, data, dev, pre, 'dev')
    test_loss = loop(model, data, test, pre, 'test')

    ## update results but not save them
    book.update_res({'train': train_loss, 'dev': dev_loss, 'test': test_loss})

    ## print results
    book.print_res(0,
                   key_order=['train', 'dev', 'test'],
                   exp=exp_num,
                   lr=optim.param_groups[0]['lr'])
def train(args, exp_num):
  args_subset = ['exp', 'cpk', 'model', 'time']
  book = BookKeeper(args, args_subset, args_dict_update={},
                    tensorboard=args.tb)
  args = book.args
  global ARGS
  ARGS = args

  ## Start Log
  book._start_log()
  
  ## Training parameters
  path2data = args.path2data
  dataset = args.dataset
  lmksSubset = args.lmksSubset
  desc = args.desc
  split = (args.train_frac, args.dev_frac)
  idx_dependent = args.idx_dependent
  batch_size = args.batch_size
  time = args.time
  chunks = args.chunks
  offset = args.offset
  mask = args.mask
  feats_kind = args.feats_kind
  s2v = args.s2v
  f_new = args.f_new
  curriculum = args.curriculum
  kl_anneal = args.kl_anneal
  
  ## Load data iterables
  data = Data(path2data, dataset, lmksSubset, desc,
              split, batch_size=batch_size,
              time=time,
              chunks=chunks,
              offset=offset,
              shuffle=True,
              mask=mask,
              feats_kind=feats_kind,
              s2v=s2v,
              f_new=f_new)

  print('Data Loaded')
  
  ## Create a model
  device = torch.device('cuda:{}'.format(args.cuda)) if args.cuda>=0 else torch.device('cpu')
  input_shape = data.input_shape
  kwargs_keys = ['pose_size', 'trajectory_size']
  modelKwargs = {key:input_shape[key] for key in kwargs_keys}
  modelKwargs.update(args.modelKwargs)

  model = eval(args.model)(**modelKwargs)
  model.to(device).double()

  book._copy_best_model(model)
  print('Model Created')
    
  ## Load model
  if args.load:
    print('Loading Model')
    book._load_model(model)

  ## Loss function
  criterion = Loss(args.losses, args.lossKwargs)
  
  ## Optimizers
  optim = torch.optim.Adam(model.parameters(), lr=args.lr)
  #optim = torch.optim.RMSprop(model.parameters(), lr=args.lr)
  ## LR scheduler
  scheduler = lr_scheduler.ExponentialLR(optim, gamma=0.99)
  
  ## Transforms
  columns = get_columns(feats_kind, data)
  pre = Transforms(args.transforms, columns, args.seed, mask, feats_kind, dataset, f_new)

  def loop(model, data, pre, desc='train', epoch=0):
    running_loss = 0
    running_internal_loss = 0
    running_count = 0
    # if kl_anneal > 0:
    #   kl_weight = lambda x: min((x+1)/(kl_anneal+1.), 2)
    # else:
    #   kl_weight = lambda x: 1
    count = 0
    if desc == 'train':
      model.train(True)
    else:
      model.eval()
    
    Tqdm = tqdm(data, desc=desc+' {:.4f}'.format(running_loss/(count+1.)), leave=False, ncols=20)
    for count, batch in enumerate(Tqdm):
      model.zero_grad()
      optim.zero_grad()
      
      X, Y = batch['input'], batch['output']
      pose, trajectory, start_trajectory = X
      pose_gt, trajectory_gt, start_trajectory_gt = Y

      x = torch.cat((trajectory, pose), dim=-1)
      y = torch.cat((trajectory_gt, pose_gt), dim=-1)
      
      x = x.to(device)
      y = y.to(device)

      ## Transform before the model
      x = pre.transform(x)
      y = pre.transform(y)

      if desc=='train':
        y_cap, internal_losses = model(x, train=True)
      else:
        y_cap, internal_losses = model(x, train=False)

      loss = 0
      loss_ = 0
      if y_cap is not None: ## if model returns only internal losses
        loss = criterion(y_cap, y)
        loss_ = loss.item()

      for i_loss in internal_losses:
        loss += i_loss
        loss_ += i_loss.item()
        running_internal_loss += i_loss.item()

      running_count +=  np.prod(y.shape)    
      running_loss += loss_
        
      # loss = criterion(y_cap, y)
      # loss_= loss.item()
      # #if count == 0 and desc == 'train':
      # #  pdb.set_trace()
      # for i_loss in internal_losses:
      #   loss += kl_weight(epoch) * i_loss
      #   loss_ += i_loss.item()
      #   running_internal_loss += i_loss
      
      # #running_loss += loss.item()
      # running_loss += loss_

      ## update tqdm
      Tqdm.set_description(desc+' {:.4f} {:.4f}'.format(running_loss/running_count, running_internal_loss/running_count))
      Tqdm.refresh()
      
      if desc == 'train':
        loss.backward()
        optim.step()
        # if kl_anneal == 0:
        #   y_cap, internal_losses = model(x, train=True, epoch=epoch)
        #   sum(internal_losses).backward()
        #   optim.step()

      x = x.detach()
      y = y.detach()
      loss = loss.detach()
      if y_cap is not None:
        y_cap = y_cap.detach()
      internal_losses = [i.detach() for i in internal_losses]

      if count>=0 and args.debug: ## debugging by overfitting
        break

    return running_loss/running_count

  num_epochs = args.num_epochs

  ## set up curriculum learning for training
  time_list = []
  time_list_idx = 0
  if curriculum:
    for power in range(1, int(np.log2(time-1)) + 1):
      time_list.append(2**power)
    data.update_dataloaders(time_list[0])
  time_list.append(time)
  
  ## Training Loop
  for epoch in tqdm(range(num_epochs), ncols=20):
    train_loss = loop(model, data.train, pre, 'train', epoch)
    dev_loss = loop(model, data.dev, pre, 'dev')
    test_loss = loop(model, data.test, pre, 'test')
    scheduler.step() ## Change the Learning Rate
    
    ## save results
    book.update_res({'train':train_loss,
                     'dev':dev_loss,
                     'test':test_loss})
    book._save_res()

    ## update tensorboard
    book.update_tb({'scalar':[[f'{args.cpk}/train', train_loss, epoch],
                              [f'{args.cpk}/dev', dev_loss, epoch],
                              [f'{args.cpk}/test', test_loss, epoch]]})
                   
                   # 'histogram':[[f'{args.cpk}/'+name, param.clone().cpu().detach().numpy(), epoch]
                    #             for name, param in model.named_parameters()]})

    ## print results
    book.print_res(epoch, key_order=['train','dev','test'], exp=exp_num, lr=scheduler.get_lr())

    ## ignore increasing dev loss till the annealing occurs
    # if epoch < kl_anneal:
    #   book.stop_count = 0
    
    if book.stop_training(model, epoch):
      ## if early_stopping criterion is met,
      ## start training with more time steps
      time_list_idx += 1
      book.stop_count = 0 ## reset the threshold counter
      book.best_dev_score = np.inf
      model.load_state_dict(copy.deepcopy(book.best_model))
      if len(time_list) > time_list_idx:
        time_ = time_list[time_list_idx]
        data.update_dataloaders(time_)
        tqdm.write('Training up to time: {}'.format(time_))
      else:
        break
    
  # End Log
  book._stop_log()

  # ## Sample
  print('Loading the best model and training with language input as well')
  args.__dict__.update({'load':book.name(book.weights_ext[0], book.weights_ext[1], args.save_dir),
                        'model':'Seq2SeqConditioned10'})
  train_wordConditioned(args, exp_num, data)
Exemple #6
0
def train(args, exp_num):
    args_subset = ['exp', 'cpk']
    book = BookKeeper(args,
                      args_subset,
                      args_dict_update={},
                      tensorboard=args.tb)
    args = book.args

    ## Start Log
    book._start_log()

    ## Training parameters
    num_epochs = args.num_epochs

    ## Load data iterables

    print('Data Loaded')

    ## Create a model

    model = Wavenet(num_channels,
                    kernel_size,
                    num_layers,
                    num_lmks,
                    sample_rate_lmks,
                    samples_per_frame,
                    dilation_limit,
                    mfccFlag=mfccFlag)
    if args.cuda >= 0:
        model.cuda(args.cuda)

    book._copy_best_model(model)
    print('Model Created')

    ## Load model
    if args.load:
        print('Loading Model')
        book._load_model(model)

    ## Loss function
    criterion = torch.nn.MSELoss()
    ## Optimizers
    optim = torch.optim.Adam(model.parameters(), lr=args.lr)

    def loop(model, data, desc='train', epoch=0):
        running_loss = 0
        running_loss_partial = 0
        if desc == 'train':
            model.train(True)
        else:
            model.eval()
        for count, batch in tqdm(enumerate(data),
                                 desc=desc,
                                 leave=False,
                                 ncols=20):
            pdb.set_trace()

    ## Training Loop
    for epoch in tqdm(range(num_epochs), ncols=20):
        train_loss = loop(model, train, 'train', epoch)
        dev_loss = loop(model, dev, 'dev')
        test_loss = loop(model, test, 'test')

        ## save results
        book.update_res({
            'train': train_loss,
            'dev': dev_loss,
            'test': test_loss
        })
        book._save_res()

        ## print results
        book.print_res(epoch, key_order=['train', 'dev', 'test'], exp=exp_num)

        if book.stop_training(model, epoch):
            break

    # End Log
    book._stop_log()
def sample(args, exp_num, data=None):
    assert args.load, 'Model name not provided'
    assert os.path.isfile(args.load), 'Model file not found'

    args_subset = ['exp', 'cpk', 'model', 'time', 'chunks']
    book = BookKeeper(args, args_subset, args_dict_update={})
    args = book.args

    dir_name = book.name.dir(args.save_dir)

    ## Training parameters
    path2data = args.path2data
    dataset = args.dataset
    lmksSubset = args.lmksSubset
    desc = args.desc
    split = (args.train_frac, args.dev_frac)
    idx_dependent = args.idx_dependent
    f_new = args.f_new

    ## hardcoded for sampling
    batch_size = args.batch_size
    time = args.time
    offset = args.offset
    ## mask for delta
    mask = args.mask

    global feats_kind
    feats_kind = args.feats_kind

    ## Load data iterables
    if data is None:
        data = Data(path2data,
                    dataset,
                    lmksSubset,
                    desc,
                    split,
                    batch_size=batch_size,
                    time=time,
                    offset=offset,
                    shuffle=False,
                    mask=mask,
                    feats_kind=feats_kind,
                    f_new=f_new)

    train = data.train.dataset.datasets
    dev = data.dev.dataset.datasets
    test = data.test.dataset.datasets

    print('Data Loaded')

    ## Create a model
    device = torch.device('cuda:{}'.format(
        args.cuda)) if args.cuda >= 0 else torch.device('cpu')
    input_shape = data.input_shape
    modelKwargs = {}
    modelKwargs.update(input_shape)
    modelKwargs.update(args.modelKwargs)

    model = eval(args.model)(**modelKwargs)
    model.to(device).double()

    print('Model Created')

    ## Load model
    if args.load:
        print('Loading Model')
        book._load_model(model)

    ## Loss function
    criterion = Loss(args.losses, args.lossKwargs)

    ## Transforms
    global columns
    columns = get_columns(feats_kind, data)
    pre = Transforms(args.transforms, columns, args.seed, mask, feats_kind,
                     dataset, f_new)

    def loop(model, data, dataLoaders, pre, batch_size, desc='train'):
        running_loss = 0
        count = 0
        model.eval()

        Tqdm = tqdm(dataLoaders,
                    desc=desc + ' {:.4f}'.format(running_loss / (count + 1.)),
                    leave=False,
                    ncols=20)
        for count, loader in enumerate(Tqdm):
            loader = DataLoader(loader, batch_size=batch_size, shuffle=False)
            outputs_list = []
            start_trajectory_list = []
            for _, batch in enumerate(loader):
                model.zero_grad()

                X, Y = batch['input'], batch['output']
                pose, trajectory, start_trajectory = X
                pose_gt, trajectory_gt, start_trajectory_gt = Y

                x = torch.cat((trajectory, pose), dim=-1)
                y = torch.cat((trajectory_gt, pose_gt), dim=-1)

                x = x.to(device)
                y = y.to(device)
                start_trajectory_gt = start_trajectory_gt.to(device)

                ## Transform before the model
                x = pre.transform(x)
                y = pre.transform(y)

                if offset == 0:
                    y_cap, internal_losses = model(x, train=False)
                    #y_cap = x
                    #internal_losses = []
                else:
                    assert 0, 'offset = {}, it must be 0 for now'.format(
                        offset)

                input_shape = sum(
                    [data.input_shape[key] for key in data.input_shape])
                trajectory_size = data.input_shape['trajectory_size']

                #y_output = y_cap.repeat(1,1,loader.dataset.f_ratio).view(new_size)
                #mask = torch.Tensor(data.mask[:3]).to(y_cap.device).double().view(1,1,-1)
                #y_output[..., :3] = (y_output[..., :3] * mask) * 1./loader.dataset.f_ratio + y_output[..., :3] * (1-mask)
                outputs_list.append(pre.inv_transform(y_cap))
                start_trajectory_list.append(start_trajectory_gt)

                loss = criterion(y_cap, y)
                for i_loss in internal_losses:
                    loss += i_loss

                running_loss += loss.item()
                ## update tqdm
                Tqdm.set_description(desc + ' {:.4f}'.format(running_loss /
                                                             (count + 1.)))
                Tqdm.refresh()

                x = x.detach()
                y = y.detach()
                loss = loss.detach()
                y_cap = y_cap.detach()

            if outputs_list:
                ## Collect all outputs
                outputs = local2global(outputs_list, start_trajectory_list,
                                       input_shape, trajectory_size, data.mask)
                new_size = list(outputs.shape)
                new_size[0] *= loader.dataset.f_ratio
                outputs = outputs.repeat(1,
                                         loader.dataset.f_ratio).view(new_size)

                outputs = outputs.detach().cpu().numpy()

                ## copy outputs in the dataframe format
                mat_full_temp = pd.DataFrame(
                    data=np.zeros((outputs.shape[0], len(columns))),
                    columns=loader.dataset.mat_full.columns)

                ## copy all joints
                mat_full_temp.loc[:, columns] = outputs

                if dataset == 'KITMocap':
                    filename = Path(dir_name) / Path(desc) / Path(
                        loader.dataset.path2csv).relative_to(
                            path2data).with_suffix('.csv')
                    os.makedirs(filename.parent, exist_ok=True)
                    if feats_kind == 'quaternion':
                        data.raw_data.mat2csv(mat_full_temp.values, filename,
                                              columns)
                    elif feats_kind == 'rifke':
                        toFKE(mat_full_temp, data,
                              filename.with_suffix('.fke'))

                elif dataset == 'CMUMocap':
                    filename = Path(dir_name) / Path(desc) / Path(
                        loader.dataset.path2csv).relative_to(
                            path2data).with_suffix('.amc')
                    os.makedirs(filename.parent, exist_ok=True)
                    if feats_kind in {'quaternion'}:
                        data.raw_data.mat2csv(mat_full_temp.values, filename,
                                              columns)
                        mat_full = toEuler(M=mat_full_temp,
                                           joints=data.raw_data.joints,
                                           euler_columns=data.raw_data.columns)
                        data.raw_data.mat2amc(mat_full.values, filename)
                    elif feats_kind == 'rifke':
                        toFKE(mat_full_temp, data,
                              filename.with_suffix('.fke'))

            if count >= 0 and args.debug:  ## debugging by overfitting
                break

        return running_loss / (count + 1.)

    ## Sample
    train_loss = loop(model, data, train, pre, batch_size, 'train')
    dev_loss = loop(model, data, dev, pre, batch_size, 'dev')
    test_loss = loop(model, data, test, pre, batch_size, 'test')

    ## update results but not save them (just to print)
    book.update_res({'train': train_loss, 'dev': dev_loss, 'test': test_loss})

    ## print results
    book.print_res(0, key_order=['train', 'dev', 'test'], exp=exp_num)
Exemple #8
0
def loop(args, exp_num):
    # assert args.load, 'Model name not provided'
    # assert os.path.isfile(args.load), 'Model file not found'

    args_subset = ['exp', 'cpk', 'model', 'time', 'chunks']
    book = BookKeeper(args,
                      args_subset,
                      args_dict_update={
                          'feats_kind': args.feats_kind,
                          'render_list': args.render_list
                      })
    args = book.args

    if args.load:
        dir_name = book.name.dir(args.save_dir)
    else:
        dir_name = args.path2data

    if args.render_list is not None:
        with open(args.render_list, 'r') as f:
            render_list = f.readlines()
            render_list = {filename.strip() for filename in render_list}
    else:
        render_list = None

    dataset = args.dataset
    feats_kind = args.feats_kind
    if dataset == 'KITMocap':
        path2data = '../dataset/kit-mocap'
    elif dataset == 'CMUMocap':
        #path2data = '../dataset/cmu-pose/all_asfamc'
        raise NotImplementedError

    data = Data(path2data,
                dataset,
                lmksSubset=['all'],
                desc=None,
                load_data=False)

    ## Load Skeleton
    skel = pkl.load(
        open('dataProcessing/{}/skeleton.p'.format(args.dataset), 'rb'))
    filenames = []
    descriptions = []
    outputs = []

    feats_kind_dict = {'quaternion': 'csv', 'fke': 'fke', 'rifke': 'rifke'}

    idx = 1
    for tup in os.walk(dir_name):
        for filename in tup[2]:
            if filename.split('.')[-1] == feats_kind_dict[feats_kind] and Path(
                    tup[0]).name != 'new':
                if render_list:  ## only render the files in render list
                    if filename.split('_')[0] not in render_list:
                        continue
                output = Path(tup[0]) / 'videos' / filename
                if not args.clean_render:  ## only render files which do not exist. Useful if rendering was interrupted/incomplete
                    if output.with_suffix('.mp4').exists():
                        continue
                outputs.append(output.with_suffix('.mp4').as_posix())
                descriptions.append(
                    get_description(data.df, filename, path2data, feats_kind))
                os.makedirs(output.parent, exist_ok=True)
                filename = Path(tup[0]) / filename
                filenames.append(filename.as_posix())

    print('{} files'.format(len(filenames)))

    parallelRender(filenames, descriptions, outputs, skel, args.feats_kind)
Exemple #9
0
def sample(args, exp_num, data=None):
    assert args.load, 'Model name not provided'
    assert os.path.isfile(args.load), 'Model file not found'

    args_subset = ['exp', 'cpk', 'model', 'time', 'chunks']
    book = BookKeeper(args, args_subset, args_dict_update={'view': args.view})
    args = book.args

    dir_name = book.name.dir(args.save_dir)

    ## Training parameters
    path2data = args.path2data
    dataset = args.dataset
    lmksSubset = args.lmksSubset
    desc = args.desc
    split = (args.train_frac, args.dev_frac)
    idx_dependent = args.idx_dependent

    ## hardcoded for sampling
    batch_size = args.batch_size
    time = args.time
    chunks = args.chunks
    offset = args.offset
    ## mask for delta
    mask = args.mask

    global feats_kind
    feats_kind = args.feats_kind
    global render_feats_kind
    render_feats_kind = {
        'rifke': 'fke',
        'quaternion': 'quaternion',
        'fke': 'fke'
    }
    s2v = args.s2v
    f_new = args.f_new
    curriculum = args.curriculum

    ## Load data iterables
    if data is None:
        data = Data(path2data,
                    dataset,
                    lmksSubset,
                    desc,
                    split,
                    batch_size=batch_size,
                    time=time,
                    chunks=chunks,
                    offset=offset,
                    shuffle=False,
                    mask=mask,
                    feats_kind=feats_kind,
                    s2v=s2v,
                    f_new=f_new)
        print('Data Loaded')
    else:
        print('Data already loaded!! Yessss!')

    train = data.train.dataset.datasets
    dev = data.dev.dataset.datasets
    test = data.test.dataset.datasets

    ## Create a model
    global device
    device = torch.device('cuda:{}'.format(
        args.cuda)) if args.cuda >= 0 else torch.device('cpu')
    input_shape = data.input_shape
    modelKwargs = {}
    modelKwargs.update(input_shape)
    modelKwargs.update(args.modelKwargs)

    ## getting the input_size
    if args.s2v:
        input_size = 300
    elif args.desc:
        input_size = len(args.desc)
    else:
        input_size = 0

    model = eval(args.model)(chunks,
                             input_size=input_size,
                             Seq2SeqKwargs=modelKwargs,
                             load=None)
    model.to(device).double()

    print('Model Created')

    ## Load model
    if args.load:
        print('Loading Model')
        book._load_model(model)

    ## Transforms
    global columns
    columns = get_columns(feats_kind, data)
    pre = Transforms(args.transforms, columns, args.seed, mask, feats_kind,
                     dataset, f_new)

    def loop(model, data, loader, sentences, pre, batch_size, desc='train'):
        filenames = []
        output_filenames = []
        descriptions = sentences
        model.eval()
        for count, s2v in enumerate(tqdm(sentences)):
            try:
                row = data.df[data.df['descriptions'] == s2v].iloc[0]
                minidata = MiniData(row[feats_kind],
                                    args.lmksSubset,
                                    args.time,
                                    sentence_vector=row['descriptions'],
                                    mask=args.mask,
                                    dataset=args.dataset,
                                    f_ratio=int(data.raw_data._get_f() /
                                                args.f_new),
                                    feats_kind=feats_kind)
                start = minidata[0]['input']
                start = np.concatenate([start[1], start[0]], axis=-1)
                start = pre.transform(
                    torch.from_numpy(start).to(device).double())[0, 0:1]
            except:
                start = torch.zeros(
                    1, data.input_shape['pose_size'] +
                    data.input_shape['trajectory_size']).to(device).double()
            y_cap, internal_losses = model.sample([s2v],
                                                  time_steps=32,
                                                  start=start)
            #outputs_list.append(pre.inv_transform(y_cap))
            #outputs = torch.cat(outputs_list, dim=0)
            outputs = pre.inv_transform(y_cap).squeeze(0)
            new_size = list(outputs.shape)
            new_size[0] *= loader[0].f_ratio
            outputs = outputs.repeat(1, loader[0].f_ratio).view(new_size)
            outputs = outputs.detach().cpu().numpy()
            ## copy outputs in the dataframe format
            mat_full_temp = pd.DataFrame(data=np.zeros(
                (outputs.shape[0], len(columns))),
                                         columns=columns)

            if feats_kind == 'rifke':
                mat_full_temp['root_tx'] = 0
                mat_full_temp['root_tz'] = 0

            ## copy all joints
            mat_full_temp.loc[:, columns] = outputs
            if dataset == 'KITMocap':
                filename = Path(dir_name) / Path('new') / Path(
                    '{}.csv'.format(count))
                filenames.append(filename.with_suffix('.fke').as_posix())
                output_filenames.append(
                    filename.with_suffix('.mp4').as_posix())
                os.makedirs(filename.parent, exist_ok=True)
                if feats_kind == 'quaternion':
                    data.raw_data.mat2csv(mat_full_temp.values, filename,
                                          columns)
                elif feats_kind == 'rifke':
                    toFKE(mat_full_temp, data, filename.with_suffix('.fke'))

        ## Render
        #parallelRender(filenames, descriptions, output_filenames, data.raw_data.skel, render_feats_kind[feats_kind])
        ## Render on slurm (cpu-node only)
        # print('Rendering')
        # render = Slurm('render_new', slurm_kwargs={'partition':'cpu_long', 'time':'10-00:00', 'n':10})
        # python_cmd = ['source activate torch',
        #               'python render_newSentence.py -load {} -view {}'.format(
        #                 args.load,
        #                 args.view)]
        # render.run('\n'.join(python_cmd))

    ## Sample
    with open(args.view, 'r') as f:
        sentences = f.readlines()
    sentences = [s.strip() for s in sentences]
    loop(model, data, train, sentences, pre, batch_size, 'train')