コード例 #1
0
def initialization(args):
    # parameters and flags
    input_batch_size = args.batch_size

    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter=[640,180,640,640,320,90]
    image_size = (camera_parameter[1],camera_parameter[0])

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag = args.coor_layer_flag)
    model = model.float()
    if args.use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model     = model.cuda()
        print(model)
    if args.finetune_flag:
        model.load_state_dict(torch.load(args.model_load))

    optimizer = optim.Adam(model.parameters(), lr=0.01)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=LambdaLR(200, 0,50).step)
    print(optimizer)
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
                transforms.Resize(image_size),
                transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
    kitti_dataset = data.data_loader.SepeDataset(path_to_poses_files=motion_files_path,path_to_image_lists=path_files_path,transform_=transforms_,camera_parameter = camera_parameter,coor_layer_flag = args.coor_layer_flag)

    dataloader = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=True ,num_workers=4,drop_last=True)
    dataloader_vis = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
                transforms.Resize(image_size),
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
    kitti_dataset_test = data.data_loader.SepeDataset(path_to_poses_files=motion_files_path_test,path_to_image_lists=path_files_path_test,transform_=transforms_,camera_parameter = camera_parameter,norm_flag=1,coor_layer_flag = args.coor_layer_flag)
    dataloader_vid = DataLoader(kitti_dataset_test, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True)
    print(len(kitti_dataset),len(kitti_dataset_test))
    vis = visualizer.Visualizer(args.visdom_ip,args.visdom_port)

    return dataloader,dataloader_vis,dataloader_vid,model,vis,lr_scheduler,optimizer
コード例 #2
0
ファイル: ssvo_feature.py プロジェクト: TimingSpace/PADVO2
    # transform
    camera_parameter=[640,180,640,640,320,90]
    image_size      =(camera_parameter[1],camera_parameter[0])
    transforms_ = [
                transforms.Resize(image_size),
                #transforms.Resize((180,651)),#robocar remap
                #transforms.Resize((262,651)),#robocar remap
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]


    kitti_dataset_test = data.data_loader.SepeDataset(path_to_poses_files=motion_files_path_test,path_to_image_lists=path_files_path_test,transform_=transforms_,norm_flag=1,camera_parameter= camera_parameter,coor_layer_flag = coor_layer_flag)

    dataloader_vid = DataLoader(kitti_dataset_test, batch_size=input_batch_size,shuffle=False ,num_workers=1,drop_last=True)

    vis = visualizer.Visualizer(args.visdom_ip,args.visdom_port)
    ####Vilidation Path###############################################################
    model.eval()
    forward_visual_result = []
    backward_visual_result =[]
    forward_visual_opti = []
    ground_truth = []
    sum_loss_epoch = 0
    all_patch_losses=[]
    all_quats=[]
    all_trans=[]
    reliability_error=[]
    for i_batch, sample_batched in enumerate(dataloader_vid):
        #print('************** i_batch',i_batch,'******************')
        #if(i_batch>2000):
        #    break
コード例 #3
0
ファイル: dcvo_train.py プロジェクト: TimingSpace/PADVO2
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = False
    pad_flag = False
    with_attention_flag = False
    rpe_flag = True
    use_gpu_flag = True
    motion_flag = [0, 1, 2, 3, 4, 5]
    data_balance_flag = False
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    vo_predictor = DCVO()
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path,
        path_to_image_lists=path_files_path,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        coor_layer_flag=coor_layer_flag)

    #dataloader = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True,sampler=kitti_dataset.sampler)
    dataloader = DataLoader(kitti_dataset,
                            batch_size=input_batch_size,
                            shuffle=True,
                            num_workers=2,
                            drop_last=True)
    if data_balance_flag:
        print('data balance by prob')
        dataloader = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True,
                                sampler=kitti_dataset.sampler)
    else:
        print('no data balance')
    dataloader_vis = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    kitti_dataset_test = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=coor_layer_flag)

    dataloader_vid = DataLoader(kitti_dataset_test,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    print(len(kitti_dataset), len(kitti_dataset_test))

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
    print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../saved_new/' + args.model_name + '_training.loss', 'a')
    testing_loss_data = open(
        '../saved_new/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../saved_new/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open('../saved_new/' + args.model_name + '_testing.ate',
                            'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        vo_predictor.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = dc_update(
                vo_predictor,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            vis.plot_current_errors(
                epoch, i_batch * input_batch_size / len(kitti_dataset),
                batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            vo_predictor.step()
        data_length = len(kitti_dataset) // input_batch_size * input_batch_size
        epoch_loss_mean = epoch_loss * input_batch_size / data_length
        vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        vo_predictor.lstep()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                vo_predictor.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    vo_predictor.zero_grad()
                    batch_loss, result = dc_update(
                        vo_predictor,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset) // input_batch_size * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                if no_motion_flag[2] == True:
                    forward_visual_result[:, 2] = 1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                forward_visual_result_m = tf.eular2pose2(
                    forward_visual_result, 1)
                ground_truth_m = tf.eular2pose2(ground_truth, 1)
                if rpe_flag:
                    rot_train, tra_train = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    training_ate_data.write(
                        str(np.mean(tra_train)) + ' ' +
                        str(np.mean(rot_train)) + '\n')
                    training_ate_data.flush()
                vis.plot_path_with_gt(forward_visual_result_m, ground_truth_m,
                                      5, 'training set forward')
                #torch.save(model.state_dict(), '../saved_model/model_'+args.model_name+'_'+str(epoch).zfill(3)+'.pt')
                vo_predictor.save(args.model_name, epoch)
                ####Vilidation Path###############################################################
                vo_predictor.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = dc_update(
                        vo_predictor,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_test) // input_batch_size * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                if no_motion_flag[2] == True:
                    forward_visual_result[:, 2] = 1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)

                forward_visual_result_m = tf.eular2pose2(
                    forward_visual_result, 1)
                ground_truth_m = tf.eular2pose2(ground_truth, 1)
                vis.plot_two_path_with_gt(forward_visual_result_m,
                                          forward_visual_result_m,
                                          ground_truth_m, 10,
                                          'testing set forward')
                vis.plot_epoch_training_validing(
                    epoch,
                    epoch_loss_visu_mean.detach().cpu().numpy(),
                    epoch_loss_eval_mean.detach().cpu().numpy())
                if rpe_flag:
                    rot_eval, tra_eval = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    testing_ate_data.write(
                        str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                        '\n')
                    testing_ate_data.flush()
                    vis.plot_epoch_training_validing_2(epoch,
                                                       np.mean(tra_train),
                                                       np.mean(tra_eval), 22)
コード例 #4
0
ファイル: padvo_train.py プロジェクト: TimingSpace/PADVO
def initialization(args):
    # parameters and flags
    input_batch_size = args.batch_size

    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag=args.coor_layer_flag).float()
    if args.use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
    if args.finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    ### init optimizer
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    opti = learning.LearningOptim(optimizer, lr_scheduler)
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path,
        path_to_image_lists=path_files_path,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        coor_layer_flag=args.coor_layer_flag)

    learning_data = learning.LearningData()
    learning_data.input_label = 'image_f_01'
    learning_data.output_label = 'motion_f_01'
    learning_data.data_loader_train = DataLoader(kitti_dataset,
                                                 batch_size=input_batch_size,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 drop_last=True)
    learning_data.data_loader_vis = DataLoader(kitti_dataset,
                                               batch_size=input_batch_size,
                                               shuffle=False,
                                               num_workers=1,
                                               drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset_test = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=args.coor_layer_flag)
    learning_data.data_loader_vid = DataLoader(kitti_dataset_test,
                                               batch_size=input_batch_size,
                                               shuffle=False,
                                               num_workers=4,
                                               drop_last=True)

    #########################init visualizer##########################################
    vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)

    #########################loss function ##########################################
    loss_func = nn.MSELoss
    if args.attention_flag:
        loss_func = loss.loss_functions.GroupWithATTLoss
    elif args.pad_flag:
        #loss_func = loss.loss_functions.GroupWithSSLoss
        loss_func = loss.loss_functions.SingleShotLoss
    else:
        loss_func = loss.loss_functions.GroupLoss

    return model, learning_data, loss_func, opti, vis
コード例 #5
0
ファイル: srvo_train.py プロジェクト: TimingSpace/PADVO2
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = False
    pad_flag = False
    with_attention_flag = False
    rpw_flag = False
    use_gpu_flag = True
    vis_flag = False
    motion_flag = [2, 4]
    data_balance_flag = False
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag=coor_layer_flag)
    model = model.float()
    if use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
        print(model)
    if finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    # bn or gn, we can increase lr
    '''
    for name, child in model.named_children():
        if name[0:3]=='att':
           print(name + ' is unfrozen')
           for param in child.parameters():
               param.requires_grad = True
        else:
           print(name + ' is frozen')
           for param in child.parameters():
               param.requires_grad = False
    '''
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    print(optimizer)
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset_t = dlr.SepeDataset(path_to_poses_files=motion_files_path,
                                      path_to_image_lists=path_files_path,
                                      transform_=transforms_,
                                      camera_parameter=camera_parameter,
                                      coor_layer_flag=coor_layer_flag)
    kitti_dataset_tv = dl.SepeDataset(path_to_poses_files=motion_files_path,
                                      path_to_image_lists=path_files_path,
                                      transform_=transforms_,
                                      camera_parameter=camera_parameter,
                                      coor_layer_flag=coor_layer_flag)

    #dataloader = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True,sampler=kitti_dataset.sampler)
    dataloader = DataLoader(kitti_dataset_t,
                            batch_size=input_batch_size,
                            shuffle=True,
                            num_workers=4,
                            drop_last=True)
    if data_balance_flag:
        print('data balance by prob')
        dataloader = DataLoader(kitti_dataset_t,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True,
                                sampler=kitti_dataset.sampler)
    else:
        print('no data balance')
    dataloader_vis = DataLoader(kitti_dataset_tv,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    kitti_dataset_test = dl.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=coor_layer_flag)

    dataloader_vid = DataLoader(kitti_dataset_test,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    print(len(kitti_dataset_t), len(kitti_dataset_test))

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    if vis_flag:
        vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
        print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../saved_new/' + args.model_name + '_training.loss', 'a')
    testing_loss_data = open(
        '../saved_new/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../saved_new/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open('../saved_new/' + args.model_name + '_testing.ate',
                            'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(
                model,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            if vis_flag:
                vis.plot_current_errors(
                    epoch, i_batch * input_batch_size / len(kitti_dataset_t),
                    batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            optimizer.step()
        data_length = len(
            kitti_dataset_t) // input_batch_size * input_batch_size
        epoch_loss_mean = epoch_loss * input_batch_size / data_length
        if vis_flag:
            vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        lr_scheduler.step()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                model.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    model.zero_grad()
                    batch_loss, result = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    if vis_flag:
                        vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_tv) // input_batch_size * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_t.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #forward_visual_result[:,2]=1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_t.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                if vis_flag:
                    vis.plot_path_with_gt(forward_visual_result_m,
                                          ground_truth_m, 5,
                                          'training set forward')

                rot_train, tra_train = evaluate.evaluate(
                    ground_truth_m, forward_visual_result_m)
                training_ate_data.write(
                    str(np.mean(tra_train)) + ' ' + str(np.mean(rot_train)) +
                    '\n')
                training_ate_data.flush()
                torch.save(
                    model.state_dict(), '../saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
                ####Vilidation Path###############################################################
                model.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_test) // input_batch_size * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #forward_visual_result[:,2]=1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                rot_eval, tra_eval = evaluate.evaluate(
                    ground_truth_m, forward_visual_result_m)

                testing_ate_data.write(
                    str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                    '\n')
                testing_ate_data.flush()
                if vis_flag:
                    vis.plot_two_path_with_gt(forward_visual_result_m,
                                              forward_visual_result_m,
                                              ground_truth_m, 10,
                                              'testing set forward')
                    vis.plot_epoch_training_validing(
                        epoch,
                        epoch_loss_visu_mean.detach().cpu().numpy(),
                        epoch_loss_eval_mean.detach().cpu().numpy())
                    vis.plot_epoch_training_validing_2(epoch,
                                                       np.mean(tra_train),
                                                       np.mean(tra_eval), 22)
コード例 #6
0
ファイル: ssvo_train.py プロジェクト: TimingSpace/PADVO2
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = args.coor_layer_flag
    pad_flag = args.pad_flag
    with_attention_flag = args.with_attention_flag
    print(coor_layer_flag, pad_flag, with_attention_flag)
    use_gpu_flag = True
    #motion_flag = [2,4]
    motion_flag = [0, 1, 2, 3, 4, 5]
    data_balance_flag = False
    color_flag = False
    vis_flag = False
    ate_flag = True
    debug_flag = False
    args.color_flag = color_flag
    args.data_balance_flag = data_balance_flag
    args.coor_layer_flag = coor_layer_flag
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])
    args.camera_parameter = camera_parameter
    args.image_size = image_size

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag=coor_layer_flag,
                                  color_flag=color_flag)
    model = model.float()
    if use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
    if finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    print(optimizer)
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    dataloader, dataloader_vis, dataloader_vid = dataloader_init(args)

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    if vis_flag:
        vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
        print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.loss',
        'a')
    testing_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.ate', 'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(
                model,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            if vis_flag:
                vis.plot_current_errors(
                    epoch, i_batch * input_batch_size / len(kitti_dataset),
                    batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            optimizer.step()
            if debug_flag:
                if i_batch > 10:
                    break
        epoch_loss_mean = epoch_loss / len(dataloader)
        if vis_flag:
            vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        lr_scheduler.step()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                model.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    model.zero_grad()
                    batch_loss, result = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    if vis_flag:
                        vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                    if debug_flag:
                        if i_batch * input_batch_size > 1500:
                            break
                data_length = len(dataloader_vis) * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu / len(dataloader_vis)
                forward_visual_result = forward_visual_result.reshape(
                    -1, 6) * dataloader_vis.dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    -1, 6) * dataloader_vis.dataset.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                if ate_flag:
                    rot_train, tra_train = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    training_ate_data.write(
                        str(np.mean(tra_train)) + ' ' +
                        str(np.mean(rot_train)) + '\n')
                    training_ate_data.flush()
                if vis_flag:
                    vis.plot_path_with_gt(forward_visual_result_m,
                                          ground_truth_m, 5,
                                          'training set forward')
                torch.save(
                    model.state_dict(), '../checkpoint/saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
                ####Vilidation Path###############################################################
                model.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                    if debug_flag:
                        if i_batch * input_batch_size > 1500:
                            break
                data_length = len(dataloader_vid) * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval / len(dataloader_vid)
                forward_visual_result = forward_visual_result.reshape(
                    -1, 6) * dataloader_vid.dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    -1, 6) * dataloader_vid.dataset.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                rpe = None
                if ate_flag:
                    rot_eval, tra_eval = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    testing_ate_data.write(
                        str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                        '\n')
                    testing_ate_data.flush()
                    rpe = str(np.mean(rot_eval)) + ' ' + str(np.mean(tra_eval))
                if vis_flag:
                    vis.plot_two_path_with_gt(forward_visual_result_m,
                                              forward_visual_result_m,
                                              ground_truth_m, 10,
                                              'testing set forward')
                    vis.plot_epoch_training_validing(
                        epoch,
                        epoch_loss_visu_mean.detach().cpu().numpy(),
                        epoch_loss_eval_mean.detach().cpu().numpy())
                    if ate_flag:
                        vis.plot_epoch_training_validing_2(
                            epoch, np.mean(tra_train), np.mean(tra_eval), 22)

                plot_path([ground_truth_m, forward_visual_result_m], epoch,
                          args, rpe)