コード例 #1
0
def trajectory(inp):
    device = torch.device("cpu")
    import individual_TF
    model = individual_TF.IndividualTF(2,
                                       3,
                                       3,
                                       N=6,
                                       d_model=512,
                                       d_ff=2048,
                                       h=8,
                                       dropout=0.1,
                                       mean=[0, 0],
                                       std=[0, 0]).to(device)

    model.load_state_dict(
        torch.load(f'models/Individual/my_data_train/00099.pth'))
    model.eval()
    gt = []
    pr = []
    inp_ = []
    peds = []
    frames = []
    dt = []
    inp = np.array(inp, dtype=np.float32)
    inp = torch.from_numpy(inp)
    inp = inp.to(device)
    src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)
    start_of_seq = torch.Tensor([0, 0, 1]).unsqueeze(0).unsqueeze(1).repeat(
        inp.shape[0], 1, 1).to(device)
    dec_inp = start_of_seq

    for i in range(12):
        trg_att = subsequent_mask(dec_inp.shape[1]).repeat(
            dec_inp.shape[0], 1, 1).to(device)
        out = model(inp, dec_inp, src_att, trg_att)
        dec_inp = torch.cat((dec_inp, out[:, -1:, :]), 1)

    preds_tr_b = (dec_inp[:, 1:, 0:2]).cpu().detach().numpy(
    )  #.cumsum(1)#+batch['src'][:,-1:,0:2].cpu().detach().numpy()
    pr.append(preds_tr_b)
    pr = np.concatenate(pr, 0)
    #print(pr)
    return pr
コード例 #2
0
def main(dataset_name,
         model_layers,
         emb_size,
         heads,
         obs=8,
         preds=12,
         dropout=0.):
    device = torch.device("cuda")
    model = individual_TF.IndividualTF(2,
                                       3,
                                       3,
                                       N=model_layers,
                                       d_model=emb_size,
                                       d_ff=2048,
                                       heads=heads,
                                       dropout=dropout,
                                       mean=[0, 0],
                                       std=[0, 0]).to(device)
    model.load_state_dict(
        torch.load(f'models/Individual/{dataset_name}/model.pth'))
    mean_std = scipy.io.loadmat(f'models/Individual/{dataset_name}/norm.mat')

    model.eval()

    inference_set, _ = baselineUtils.create_dataset("datasets",
                                                    args.dataset_name,
                                                    0,
                                                    obs,
                                                    preds,
                                                    verbose=True,
                                                    inference=True)

    inference_dl = torch.utils.data.DataLoader(inference_set,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=0)
    model.eval()

    gt = []
    pr = []
    inp_ = []
    peds = []
    frames = []
    dt = []

    for id_b, batch in enumerate(inference_dl):
        inp_.append(batch['src'])
        gt.append(batch['trg'][:, :, 0:2])
        frames.append(batch['frames'])
        peds.append(batch['peds'])
        dt.append(batch['dataset'])

        inp = (batch['src'][:, 1:, 2:4].to(device) -
               torch.tensor(mean_std["mean"], device=device)) / torch.tensor(
                   mean_std["std"], device=device)
        src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)
        start_of_seq = torch.Tensor([0, 0,
                                     1]).unsqueeze(0).unsqueeze(1).repeat(
                                         inp.shape[0], 1, 1).to(device)
        dec_inp = start_of_seq

        for i in range(12):
            trg_att = subsequent_mask(dec_inp.shape[1]).repeat(
                dec_inp.shape[0], 1, 1).to(device)
            out = model(inp, dec_inp, src_att, trg_att)
            dec_inp = torch.cat((dec_inp, out[:, -1:, :]), 1)

        preds_tr_b = (dec_inp[:, 1:, 0:2] * torch.tensor(mean_std["std"], device=device)
                      + torch.tensor(mean_std["mean"], device=device)).cpu().detach().numpy().cumsum(1) + \
                     batch['src'][:, -1:, 0:2].cpu().numpy()
        pr.append(preds_tr_b)
        print("inference: batch %04i / %04i" % (id_b, len(inference_dl)))

    peds = np.concatenate(peds, 0)
    frames = np.concatenate(frames, 0)
    dt = np.concatenate(dt, 0)
    gt = np.concatenate(gt, 0)
    dt_names = inference_set.data['dataset_name']
    pr = np.concatenate(pr, 0)
    mad, fad, errs = baselineUtils.distance_metrics(gt, pr)
    print(gt[0], pr[0])
    print(mad, fad)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(
        description='Train the individual Transformer model')
    parser.add_argument('--dataset_folder', type=str, default='datasets')
    parser.add_argument('--dataset_name', type=str, default='zara1')
    parser.add_argument('--obs', type=int, default=8)
    parser.add_argument('--preds', type=int, default=12)
    parser.add_argument('--emb_size', type=int, default=512)
    parser.add_argument('--heads', type=int, default=8)
    parser.add_argument('--layers', type=int, default=6)
    parser.add_argument('--dropout', type=float, default=0.1)
    parser.add_argument('--cpu', action='store_true')
    parser.add_argument('--val_size', type=int, default=0)
    parser.add_argument('--verbose', action='store_true')
    parser.add_argument('--max_epoch', type=int, default=1500)
    parser.add_argument('--batch_size', type=int, default=70)
    parser.add_argument('--validation_epoch_start', type=int, default=30)
    parser.add_argument('--resume_train', action='store_true')
    parser.add_argument('--delim', type=str, default='\t')
    parser.add_argument('--name', type=str, default="zara1")
    parser.add_argument('--factor', type=float, default=1.)
    parser.add_argument('--save_step', type=int, default=1)
    parser.add_argument('--warmup', type=int, default=10)
    parser.add_argument('--evaluate', type=bool, default=True)

    args = parser.parse_args()
    model_name = args.name

    try:
        os.mkdir('models')
    except:
        pass
    try:
        os.mkdir('output')
    except:
        pass
    try:
        os.mkdir('output/Individual')
    except:
        pass
    try:
        os.mkdir(f'models/Individual')
    except:
        pass

    try:
        os.mkdir(f'output/Individual/{args.name}')
    except:
        pass

    try:
        os.mkdir(f'models/Individual/{args.name}')
    except:
        pass

    log = SummaryWriter('logs/Ind_%s' % model_name)

    log.add_scalar('eval/mad', 0, 0)
    log.add_scalar('eval/fad', 0, 0)
    device = torch.device("cuda")

    if args.cpu or not torch.cuda.is_available():
        device = torch.device("cpu")

    args.verbose = True

    ## creation of the dataloaders for train and validation
    if args.val_size == 0:
        train_dataset, _ = baselineUtils.create_dataset(args.dataset_folder,
                                                        args.dataset_name,
                                                        0,
                                                        args.obs,
                                                        args.preds,
                                                        delim=args.delim,
                                                        train=True,
                                                        verbose=args.verbose)
        val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder,
                                                      args.dataset_name,
                                                      0,
                                                      args.obs,
                                                      args.preds,
                                                      delim=args.delim,
                                                      train=False,
                                                      verbose=args.verbose)
    else:
        train_dataset, val_dataset = baselineUtils.create_dataset(
            args.dataset_folder,
            args.dataset_name,
            args.val_size,
            args.obs,
            args.preds,
            delim=args.delim,
            train=True,
            verbose=args.verbose)

    test_dataset, _ = baselineUtils.create_dataset(args.dataset_folder,
                                                   args.dataset_name,
                                                   0,
                                                   args.obs,
                                                   args.preds,
                                                   delim=args.delim,
                                                   train=False,
                                                   eval=True,
                                                   verbose=args.verbose)

    import individual_TF
    model = individual_TF.IndividualTF(2,
                                       3,
                                       3,
                                       N=args.layers,
                                       d_model=args.emb_size,
                                       d_ff=2048,
                                       h=args.heads,
                                       dropout=args.dropout,
                                       mean=[0, 0],
                                       std=[0, 0]).to(device)

    tr_dl = torch.utils.data.DataLoader(train_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        num_workers=0)
    val_dl = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=0)
    test_dl = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          num_workers=0)

    #optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)
    #sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)
    optim = NoamOpt(
        args.emb_size, args.factor,
        len(tr_dl) * args.warmup,
        torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98),
                         eps=1e-9))
    #optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)
    epoch = 0

    #mean=train_dataset[:]['src'][:,1:,2:4].mean((0,1))
    mean = torch.cat((train_dataset[:]['src'][:, 1:, 2:4],
                      train_dataset[:]['trg'][:, :, 2:4]), 1).mean((0, 1))
    #std=train_dataset[:]['src'][:,1:,2:4].std((0,1))
    std = torch.cat((train_dataset[:]['src'][:, 1:, 2:4],
                     train_dataset[:]['trg'][:, :, 2:4]), 1).std((0, 1))
    means = []
    stds = []
    for i in np.unique(train_dataset[:]['dataset']):
        ind = train_dataset[:]['dataset'] == i
        means.append(
            torch.cat((train_dataset[:]['src'][ind, 1:, 2:4],
                       train_dataset[:]['trg'][ind, :, 2:4]), 1).mean((0, 1)))
        stds.append(
            torch.cat((train_dataset[:]['src'][ind, 1:, 2:4],
                       train_dataset[:]['trg'][ind, :, 2:4]), 1).std((0, 1)))
    mean = torch.stack(means).mean(0)
    std = torch.stack(stds).mean(0)

    scipy.io.savemat(f'models/Individual/{args.name}/norm.mat', {
        'mean': mean.cpu().numpy(),
        'std': std.cpu().numpy()
    })

    while epoch < args.max_epoch:
        epoch_loss = 0
        model.train()

        for id_b, batch in enumerate(tr_dl):

            optim.optimizer.zero_grad()  #将所有variable的grad设置为0
            inp = (batch['src'][:, 1:, 2:4].to(device) -
                   mean.to(device)) / std.to(device)
            target = (batch['trg'][:, :-1, 2:4].to(device) -
                      mean.to(device)) / std.to(device)
            target_c = torch.zeros(
                (target.shape[0], target.shape[1], 1)).to(device)
            target = torch.cat((target, target_c), -1)
            start_of_seq = torch.Tensor([0, 0,
                                         1]).unsqueeze(0).unsqueeze(1).repeat(
                                             target.shape[0], 1, 1).to(device)

            dec_inp = torch.cat((start_of_seq, target), 1)

            src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)
            trg_att = subsequent_mask(dec_inp.shape[1]).repeat(
                dec_inp.shape[0], 1, 1).to(device)

            pred = model(inp, dec_inp, src_att, trg_att)

            loss = F.pairwise_distance(
                pred[:, :, 0:2].contiguous().view(-1, 2),
                ((batch['trg'][:, :, 2:4].to(device) - mean.to(device)) /
                 std.to(device)).contiguous().view(
                     -1, 2).to(device)).mean() + torch.mean(
                         torch.abs(pred[:, :, 2]))
            loss.backward()
            optim.step()  # 更新variable的grad
            print("train epoch %03i/%03i  batch %04i / %04i loss: %7.4f" %
                  (epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))
            epoch_loss += loss.item()
        #sched.step()
        log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)
        with torch.no_grad():
            model.eval()  # 不更新梯度和batchnormalize

            val_loss = 0
            step = 0
            model.eval()
            gt = []
            pr = []
            inp_ = []
            peds = []
            frames = []
            dt = []

            for id_b, batch in enumerate(val_dl):
                inp_.append(batch['src'])
                gt.append(batch['trg'][:, :, 0:2])
                frames.append(batch['frames'])
                peds.append(batch['peds'])
                dt.append(batch['dataset'])

                inp = (batch['src'][:, 1:, 2:4].to(device) -
                       mean.to(device)) / std.to(device)
                src_att = torch.ones(
                    (inp.shape[0], 1, inp.shape[1])).to(device)
                start_of_seq = torch.Tensor(
                    [0, 0,
                     1]).unsqueeze(0).unsqueeze(1).repeat(inp.shape[0], 1,
                                                          1).to(device)
                dec_inp = start_of_seq

                for i in range(args.preds):
                    trg_att = subsequent_mask(dec_inp.shape[1]).repeat(
                        dec_inp.shape[0], 1, 1).to(device)
                    out = model(inp, dec_inp, src_att, trg_att)
                    dec_inp = torch.cat((dec_inp, out[:, -1:, :]), 1)

                preds_tr_b = (dec_inp[:, 1:, 0:2] * std.to(device) +
                              mean.to(device)).cpu().numpy().cumsum(
                                  1) + batch['src'][:, -1:, 0:2].cpu().numpy()
                pr.append(preds_tr_b)
                print("val epoch %03i/%03i  batch %04i / %04i" %
                      (epoch, args.max_epoch, id_b, len(val_dl)))

            peds = np.concatenate(peds, 0)
            frames = np.concatenate(frames, 0)
            dt = np.concatenate(dt, 0)
            gt = np.concatenate(gt, 0)
            dt_names = test_dataset.data['dataset_name']
            pr = np.concatenate(pr, 0)
            mad, fad, errs = baselineUtils.distance_metrics(gt, pr)
            log.add_scalar('validation/MAD', mad, epoch)
            log.add_scalar('validation/FAD', fad, epoch)

            if args.evaluate:

                model.eval()
                gt = []
                pr = []
                inp_ = []
                peds = []
                frames = []
                dt = []

                for id_b, batch in enumerate(test_dl):
                    inp_.append(batch['src'])
                    gt.append(batch['trg'][:, :, 0:2])
                    frames.append(batch['frames'])
                    peds.append(batch['peds'])
                    dt.append(batch['dataset'])

                    inp = (batch['src'][:, 1:, 2:4].to(device) -
                           mean.to(device)) / std.to(device)
                    src_att = torch.ones(
                        (inp.shape[0], 1, inp.shape[1])).to(device)
                    start_of_seq = torch.Tensor([
                        0, 0, 1
                    ]).unsqueeze(0).unsqueeze(1).repeat(inp.shape[0], 1,
                                                        1).to(device)
                    dec_inp = start_of_seq

                    for i in range(args.preds):
                        trg_att = subsequent_mask(dec_inp.shape[1]).repeat(
                            dec_inp.shape[0], 1, 1).to(device)
                        out = model(inp, dec_inp, src_att, trg_att)
                        dec_inp = torch.cat((dec_inp, out[:, -1:, :]), 1)

                    preds_tr_b = (dec_inp[:, 1:, 0:2] * std.to(device) +
                                  mean.to(device)).cpu().numpy().cumsum(
                                      1) + batch['src'][:, -1:,
                                                        0:2].cpu().numpy()
                    pr.append(preds_tr_b)
                    print("test epoch %03i/%03i  batch %04i / %04i" %
                          (epoch, args.max_epoch, id_b, len(test_dl)))

                peds = np.concatenate(peds, 0)
                frames = np.concatenate(frames, 0)
                dt = np.concatenate(dt, 0)
                gt = np.concatenate(gt, 0)
                dt_names = test_dataset.data['dataset_name']
                pr = np.concatenate(pr, 0)
                mad, fad, errs = baselineUtils.distance_metrics(gt, pr)

                log.add_scalar('eval/DET_mad', mad, epoch)
                log.add_scalar('eval/DET_fad', fad, epoch)

                # log.add_scalar('eval/DET_mad', mad, epoch)
                # log.add_scalar('eval/DET_fad', fad, epoch)

                scipy.io.savemat(
                    f"output/Individual/{args.name}/det_{epoch}.mat", {
                        'input': inp,
                        'gt': gt,
                        'pr': pr,
                        'peds': peds,
                        'frames': frames,
                        'dt': dt,
                        'dt_names': dt_names
                    })

        if epoch % args.save_step == 0:

            torch.save(model.state_dict(),
                       f'models/Individual/{args.name}/{epoch:05d}.pth')

        epoch += 1
    ab = 1
コード例 #4
0
def main():
    parser=argparse.ArgumentParser(description='Train the individual Transformer model')
    parser.add_argument('--dataset_folder',type=str,default='datasets')
    parser.add_argument('--dataset_name',type=str,default='zara1')
    parser.add_argument('--obs',type=int,default=8)
    parser.add_argument('--preds',type=int,default=12)
    parser.add_argument('--emb_size',type=int,default=512)
    parser.add_argument('--heads',type=int, default=8)
    parser.add_argument('--layers',type=int,default=6)
    parser.add_argument('--dropout',type=float,default=0.1)
    parser.add_argument('--cpu',action='store_true')
    parser.add_argument('--val_size',type=int, default=0)
    parser.add_argument('--verbose',action='store_true')
    parser.add_argument('--max_epoch',type=int, default=1500)
    parser.add_argument('--batch_size',type=int,default=70)
    parser.add_argument('--validation_epoch_start', type=int, default=30)
    parser.add_argument('--resume_train',action='store_true')
    parser.add_argument('--delim',type=str,default='\t')
    parser.add_argument('--name', type=str, default="zara1")
    parser.add_argument('--factor', type=float, default=1.)
    parser.add_argument('--save_step', type=int, default=1)
    parser.add_argument('--warmup', type=int, default=10)
    parser.add_argument('--evaluate', type=bool, default=True)
    parser.add_argument('--model_pth', type=str)




    args=parser.parse_args()
    model_name=args.name

    try:
        os.mkdir('models')
    except:
        pass
    try:
        os.mkdir('output')
    except:
        pass
    try:
        os.mkdir('output/Individual')
    except:
        pass
    try:
        os.mkdir(f'models/Individual')
    except:
        pass

    try:
        os.mkdir(f'output/Individual/{args.name}')
    except:
        pass

    try:
        os.mkdir(f'models/Individual/{args.name}')
    except:
        pass

    #log=SummaryWriter('logs/Ind_%s'%model_name)

    #log.add_scalar('eval/mad', 0, 0)
    #log.add_scalar('eval/fad', 0, 0)
    device=torch.device("cuda")

    if args.cpu or not torch.cuda.is_available():
      device=torch.device("cpu")

    args.verbose=True
    if args.val_size==0:
        train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)
        val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,
                                                                    args.preds, delim=args.delim, train=False,
                                                                    verbose=args.verbose)
    else:
        train_dataset, val_dataset = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, args.val_size,args.obs,
                                                              args.preds, delim=args.delim, train=True,
                                                              verbose=args.verbose)

    test_dataset,_ =  baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)




    import individual_TF
    model=individual_TF.IndividualTF(2, 3, 3, N=args.layers,
                   d_model=args.emb_size, d_ff=2048, h=args.heads, dropout=args.dropout,mean=[0,0],std=[0,0]).to(device)

    
    model.load_state_dict(torch.load(f'models/Individual/my_data_train/00600.pth'))
    #tr_dl = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
    #val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
    test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)



    #mean=train_dataset[:]['src'][:,1:,2:4].mean((0,1))
    #mean=torch.cat((train_dataset[:]['src'][:,1:,2:4],train_dataset[:]['trg'][:,:,2:4]),1).mean((0,1))
    #std=train_dataset[:]['src'][:,1:,2:4].std((0,1))
    #std=torch.cat((train_dataset[:]['src'][:,1:,2:4],train_dataset[:]['trg'][:,:,2:4]),1).std((0,1))
    #means=[]
    #stds=[]
    #for i in np.unique(train_dataset[:]['dataset']):
    #    ind=train_dataset[:]['dataset']==i
    #    means.append(torch.cat((train_dataset[:]['src'][ind, 1:, 2:4], train_dataset[:]['trg'][ind, :, 2:4]), 1).mean((0, 1)))
    #    stds.append(
    #        torch.cat((train_dataset[:]['src'][ind, 1:, 2:4], train_dataset[:]['trg'][ind, :, 2:4]), 1).std((0, 1)))
    #mean=torch.stack(means).mean(0)
    #std=torch.stack(stds).mean(0)

    model.eval()
    gt = []
    pr = []
    inp_ = []
    peds = []
    frames = []
    dt = []
                
    for id_b,batch in enumerate(test_dl):
        inp_.append(batch['src'])
        gt.append(batch['trg'][:,:,0:2])
        frames.append(batch['frames'])
        peds.append(batch['peds'])
        dt.append(batch['dataset'])

        inp = batch['src'][:, 1:, 2:4].to(device) #- mean.to(device)) / std.to(device)
        src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)
        start_of_seq = torch.Tensor([0, 0, 1]).unsqueeze(0).unsqueeze(1).repeat(inp.shape[0], 1, 1).to(
                        device)
        dec_inp=start_of_seq

        for i in range(args.preds):
            trg_att = subsequent_mask(dec_inp.shape[1]).repeat(dec_inp.shape[0], 1, 1).to(device)
            out = model(inp, dec_inp, src_att, trg_att)
            dec_inp=torch.cat((dec_inp,out[:,-1:,:]),1)


        preds_tr_b=(dec_inp[:,1:,0:2]).cpu().detach().numpy().cumsum(1)+batch['src'][:,-1:,0:2].cpu().detach().numpy()
        pr.append(preds_tr_b)
        # print("test epoch %03i/%03i  batch %04i / %04i" % (
        #         epoch, args.max_epoch, id_b, len(test_dl)))

    peds = np.concatenate(peds, 0)
    frames = np.concatenate(frames, 0)
    dt = np.concatenate(dt, 0)
    gt = np.concatenate(gt, 0)
    dt_names = test_dataset.data['dataset_name']
    pr = np.concatenate(pr, 0)
    mad, fad, errs = baselineUtils.distance_metrics(gt, pr)
    #print(frames)
    #print(dt.shape)
    #print(dt)
    #print(gt[1])
    #print(pr[1])
    print("mad %f fad %f"%(mad,fad))
    for i in range(pr.shape[0]):
        pathin = 'c_1 frames/c_1_'
        pathout = '5_1 frames_out/'
        img = cv2.imread(pathin+str(frames[i][8])+'.jpg')
        cg = (0,255,0) # green
        cp = (0,0,255) # red
        #print(gt[i])
        #print(pr[i])
        for j in range(12):
            gp = (int(gt[i,j,0]*1920),int(gt[i,j,1]*1080))
            pp = (int(pr[i,j,0]*1920),int(pr[i,j,1]*1080))
            img = cv2.circle(img,gp,3,cg,-1)
            img = cv2.circle(img,pp,3,cp,-1)
            #print(gp)
            #print(pp)
            #print(frames[i][8])
        cv2.imwrite(pathout+str(frames[i][8])+'.jpg',img)
コード例 #5
0
def main():
    parser=argparse.ArgumentParser(description='Train the individual Transformer model')
    parser.add_argument('--dataset_folder',type=str,default='datasets')
    parser.add_argument('--dataset_name',type=str,default='zara1')
    parser.add_argument('--obs',type=int,default=8)
    parser.add_argument('--preds',type=int,default=12)
    parser.add_argument('--emb_size',type=int,default=512)
    parser.add_argument('--heads',type=int, default=8)
    parser.add_argument('--layers',type=int,default=6)
    parser.add_argument('--dropout',type=float,default=0.1)
    parser.add_argument('--cpu',action='store_true')
    parser.add_argument('--val_size',type=int, default=0)
    parser.add_argument('--verbose',action='store_true')
    parser.add_argument('--max_epoch',type=int, default=1500)
    parser.add_argument('--batch_size',type=int,default=1)
    parser.add_argument('--validation_epoch_start', type=int, default=30)
    parser.add_argument('--resume_train',action='store_true')
    parser.add_argument('--delim',type=str,default='\t')
    parser.add_argument('--name', type=str, default="zara1")
    parser.add_argument('--factor', type=float, default=1.)
    parser.add_argument('--save_step', type=int, default=1)
    parser.add_argument('--warmup', type=int, default=10)
    parser.add_argument('--evaluate', type=bool, default=True)
    parser.add_argument('--model_pth', type=str)




    args=parser.parse_args()
    model_name=args.name

    #device=torch.device("cuda")

    #if args.cpu or not torch.cuda.is_available():
    device=torch.device("cpu")

    args.verbose=True

    test_dataset,_ =  baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)


    import individual_TF
    model=individual_TF.IndividualTF(2, 3, 3, N=args.layers,
                   d_model=args.emb_size, d_ff=2048, h=args.heads, dropout=args.dropout,mean=[0,0],std=[0,0]).to(device)

    
    model.load_state_dict(torch.load(f'models/Individual/my_data_train/00013.pth'))
    test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)

    model.eval()
    gt = []
    pr = []
    inp_ = []
    peds = []
    frames = []
    dt = []
                
    for id_b,batch in enumerate(test_dl):
        #print(batch['src'].shape)
        #inp_.append(batch['src'])
        gt.append(batch['trg'][:,:,0:2])
        #frames.append(batch['frames'])
        #peds.append(batch['peds'])
        #dt.append(batch['dataset'])

        inp = batch['src'][:, 1:, 2:4].to(device) #- mean.to(device)) / std.to(device)
        #print(inp.shape)
        src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)
        start_of_seq = torch.Tensor([0, 0, 1]).unsqueeze(0).unsqueeze(1).repeat(inp.shape[0], 1, 1).to(
                        device)
        # print("start of seq")
        # print(start_of_seq[0])
        dec_inp=start_of_seq

        for i in range(args.preds):
            trg_att = subsequent_mask(dec_inp.shape[1]).repeat(dec_inp.shape[0], 1, 1).to(device)
            # print("src_att shape")
            # print(src_att.shape)
            # print("trg_att shape")
            # print(trg_att.shape)
            out = model(inp, dec_inp, src_att, trg_att)
            # print("out shape")
            # print(out.shape)
            # print("-----------")
            dec_inp=torch.cat((dec_inp,out[:,-1:,:]),1)


        print("batch['src']")
        print(batch['src'].shape)
        preds_tr_b=(dec_inp[:,1:,0:2]).cpu().detach().numpy().cumsum(1)+batch['src'][:,-1:,0:2].cpu().detach().numpy()
        #print(preds_tr_b[1])
        pr.append(preds_tr_b)
        # print("test epoch %03i/%03i  batch %04i / %04i" % (
        #         epoch, args.max_epoch, id_b, len(test_dl)))
    gt = np.concatenate(gt, 0)
    #dt_names = test_dataset.data['dataset_name']
    pr = np.concatenate(pr, 0)
    mad, fad, errs = baselineUtils.distance_metrics(gt, pr)