Пример #1
0
 def __init__(self,
              root_folder,
              type,
              input_type,
              split_filename='',
              transform_color=None,
              transform_depth=None,
              img_res=None,
              crop_res=None,
              for_autoencoding=False,
              fpa_subj_split=False,
              fpa_obj_split=False):
     super(FPADatasetPoseRegressionFromVQVAE,
           self).__init__(root_folder,
                          type,
                          input_type,
                          transform_color=transform_color,
                          transform_depth=transform_depth,
                          img_res=img_res,
                          split_filename=split_filename,
                          for_autoencoding=for_autoencoding)
     self.fpa_subj_split = fpa_subj_split
     self.fpa_obj_split = fpa_obj_split
     if split_filename == '':
         fpa_io.create_split_file(self.root_folder,
                                  self.video_folder,
                                  perc_train=0.7,
                                  perc_valid=0.15,
                                  only_with_obj_pose=False,
                                  fpa_subj_split=fpa_subj_split,
                                  fpa_obj_split=fpa_obj_split,
                                  split_filename='fpa_split_subj.p')
         self.split_filename = self.default_split_filename
     self.dataset_split = fpa_io.load_split_file(self.root_folder,
                                                 self.split_filename)
Пример #2
0
    def __init__(self,
                 root_folder,
                 type,
                 input_type,
                 transform_color=None,
                 transform_depth=None,
                 img_res=None,
                 crop_res=None,
                 split_filename='',
                 for_autoencoding=False):
        super(FPADatasetTracking,
              self).__init__(root_folder,
                             type,
                             input_type,
                             transform_color=transform_color,
                             transform_depth=transform_depth,
                             img_res=img_res,
                             crop_res=crop_res,
                             split_filename=split_filename,
                             for_autoencoding=for_autoencoding)
        if not crop_res is None:
            self.crop_res = crop_res

        if self.split_filename == '':
            fpa_io.create_split_file(self.root_folder,
                                     self.video_folder,
                                     perc_train=0.7,
                                     perc_valid=0.15)
        else:
            self.dataset_split = fpa_io.load_split_file(
                self.root_folder, self.split_filename)
Пример #3
0
    def __init__(self,
                 root_folder,
                 type,
                 transform=None,
                 img_res=None,
                 split_filename=''):
        self.root_folder = root_folder
        self.transform = transform
        self.img_res = img_res
        self.split_filename = split_filename
        self.type = type

        if self.split_filename == '':
            fpa_io.create_split_file(self.root_folder,
                                     self.gt_folder,
                                     num_train_seq=2,
                                     actions=None)
        else:
            self.dataset_tuples = fpa_io.load_split_file(
                self.root_folder, self.split_filename)
Пример #4
0
 def __init__(self, params_dict):
     super(FPADatasetObjRGBReconstruction, self).\
         __init__(params_dict['root_folder'],
                  split_filename=params_dict['split_filename'],
                  img_res=params_dict['img_res'])
     self.params_dict = params_dict
     # create or load dataset split
     if params_dict['split_filename'] == '':
         fpa_io.create_split_file(
             params_dict['dataset_root_folder'],
             perc_train=0.8,
             perc_valid=0.,
             only_with_obj_pose=True,
             fpa_subj_split=False,
             fpa_obj_split=False,
             split_filename=params_dict['split_filename'])
     self.dataset_split = fpa_io.load_split_file(self.root_folder,
                                                 self.split_filename)
     self.type = params_dict['type']
     self.unnormalize = UnNormalize(self.transform.transforms[-1].mean,
                                    self.transform.transforms[-1].std,
                                    img=True)
Пример #5
0
parser.add_argument('--fpa-obj-split',
                    default=False,
                    action='store_true',
                    help='Whether to use the FPA paper cross-object split')
parser.add_argument('--all',
                    default=False,
                    action='store_true',
                    help='Create all splits')

args = parser.parse_args()

if args.all:
    fpa_io.create_split_file(args.dataset_root_folder,
                             args.video_folder,
                             perc_train=0.7,
                             perc_valid=0.15,
                             only_with_obj_pose=False,
                             fpa_subj_split=True,
                             fpa_obj_split=False)
    fpa_io.create_split_file(args.dataset_root_folder,
                             args.video_folder,
                             perc_train=0.7,
                             perc_valid=0.15,
                             only_with_obj_pose=False,
                             fpa_subj_split=False,
                             fpa_obj_split=True)
    fpa_io.create_split_file(args.dataset_root_folder,
                             args.video_folder,
                             perc_train=0.7,
                             perc_valid=0.15,
                             only_with_obj_pose=False,
Пример #6
0
    myprint("\tSaving a checkpoint...", log_filepath)
    torch.save(state, filename)


loss_function = nn.NLLLoss()

actions = [
    'charge_cell_phone', 'clean_glasses', 'close_juice_bottle',
    'close_liquid_soap', 'close_milk', 'close_peanut_butter', 'drink_mug',
    'flip_pages', 'flip_sponge', 'give_card'
]

fpa_io.create_split_file(args.dataset_root_folder,
                         args.gt_folder,
                         num_train_seq=2,
                         actions=[
                             'charge_cell_phone', 'clean_glasses',
                             'close_juice_bottle', 'close_liquid_soap',
                             'close_milk'
                         ])

dataset_tuples = fpa_io.load_split_file(args.dataset_root_folder)

lstm_baseline = LSTMBaseline(num_joints=21,
                             num_actions=dataset_tuples['num_actions'],
                             use_cuda=args.use_cuda)

if args.use_cuda:
    lstm_baseline = lstm_baseline.cuda()

optimizer = optim.Adadelta(lstm_baseline.parameters(),
                           rho=0.9,