コード例 #1
0
ファイル: inference.py プロジェクト: tf369/GEP_PAMI
def main():
    paths = config.Paths()
    start_time = time.time()
    np.random.seed(0)
    evaluate(paths)
    # test(paths)
    print('Time elapsed: {}s'.format(time.time() - start_time))
コード例 #2
0
ファイル: exp_config.py プロジェクト: tf369/GEP_PAMI
 def __init__(self, dataset='VCLA_GAZE'):
     self.paths_dict = {
         'WNP': wnp_config.Paths(),
         'VCLA_GAZE': vcla_gaze_config.Paths(),
         'CAD': cad_config.Paths(),
         'Breakfast': breakfast_config.Paths()
     }
     self.metadata_dict = {
         'WNP': WNP_METADATA(),
         'VCLA_GAZE': VCLA_METADATA(),
         'CAD': CAD_METADATA(),
         'Breakfast': BREAKFAST_METADATA()
     }
     self.dataset_dict = {
         'WNP':
         lambda path, mode, task, subsample: wnp.WNP(
             path, mode, task, subsample),
         'VCLA_GAZE':
         lambda path, mode, task, subsample: vcla_gaze.VCLA_GAZE(
             path, mode, task, subsample),
         'CAD':
         lambda path, mode, task, subsample: cad.CAD(
             path, mode, task, subsample),
         'Breakfast':
         lambda path, mode, task, subsample: breakfast.Breakfast(
             path, mode, task, subsample)
     }
     self.dataset = self.dataset_dict[dataset]
     self.paths = self.paths_dict[dataset]
     self.metadata = self.metadata_dict[dataset]
コード例 #3
0
ファイル: dataparser.py プロジェクト: tf369/GEP_PAMI
def main():
    paths = config.Paths()
    start_time = time.time()

    parse_data(paths)

    print('Time elapsed: {}'.format(time.time() - start_time))
コード例 #4
0
ファイル: grammarutils.py プロジェクト: tf369/GEP_PAMI
def main():
    paths = config.Paths()
    start_time = time.time()
    # induce_activity_grammar(paths)
    # read_induced_grammar(paths)
    test(paths)
    print('Time elapsed: {}'.format(time.time() - start_time))
コード例 #5
0
ファイル: cad_finetune.py プロジェクト: tf369/GEP_PAMI
def main():
    paths = config.Paths()
    with open(os.path.join(paths.tmp_root, 'label_list.p'), 'rb') as f:
        sequence_ids = pickle.load(f)
    train_num = 10
    keys = list(sequence_ids.keys())
    shuffle(keys)
    train_ids = ['1130144242$4']
    train_set = CAD_FEATURE(paths, train_ids, 'affordance')
    feature, label = train_set[0]
    print('Finished')
コード例 #6
0
ファイル: finetune.py プロジェクト: tf369/GEP_PAMI
def parse_args():
    def restricted_float(x, inter):
        x = float(x)
        if x < inter[0] or x > inter[1]:
            raise argparse.ArgumentTypeError("{} not in range [{}, {}]".format(
                x, inter[0], inter[1]))
        return x

    paths = cad_config.Paths()
    model_name = 'resnet'
    tasks = ['affordance', 'activity']
    task = tasks[0]

    parser = argparse.ArgumentParser(description='VCLA feature extraction')
    parser.add_argument('--task',
                        default=task,
                        type=str,
                        help='Default task for network training')
    parser.add_argument(
        '--cuda',
        default=torch.cuda.is_available(),
        type=bool,
        help='Option flag for using cuda trining (default: True)')
    parser.add_argument(
        '--distributed',
        default=False,
        type=bool,
        help='Option flag for using distributed training (default: True)')
    parser.add_argument(
        '--model',
        default=model_name,
        type=str,
        help='model to use when extracting features (default: resnet)')
    parser.add_argument('--workers',
                        default=10,
                        type=int,
                        metavar='N',
                        help='number of data loading workers (default: 1)')
    parser.add_argument('--start_epoch',
                        default=0,
                        type=int,
                        metavar='N',
                        help='starting epoch of training (default: 0)')
    parser.add_argument('--epochs',
                        default=10,
                        type=int,
                        metavar='N',
                        help='number of epochs for training (default: 100)')
    parser.add_argument('--batch_size',
                        default=16,
                        type=int,
                        metavar='N',
                        help='batch size for training (default: 16)')
    parser.add_argument(
        '--lr',
        default=1e-3,
        type=float,
        help='learning rate for the feature extraction process (default: 1e-3)'
    )
    parser.add_argument(
        '--lr_decay',
        type=lambda x: restricted_float(x, [0.01, 1]),
        help='decay rate of learning rate (default: between 0.01 and 1)')
    parser.add_argument('--log_interval',
                        type=int,
                        default=50,
                        metavar='N',
                        help='Intervals for logging (default: 10 batch)')
    parser.add_argument(
        '--save_interval',
        type=int,
        default=1,
        metavar='N',
        help='Intervals for saving checkpoint (default: 3 epochs)')

    parser.add_argument(
        '--train_ratio',
        type=float,
        default=0.6,
        help='ratio of data for training purposes (default: 0.65)')
    parser.add_argument(
        '--val_ratio',
        type=float,
        default=0.1,
        help='ratio of data for validation purposes (default: 0.1)')

    parser.add_argument(
        '--eval',
        default=False,
        type=bool,
        help='indicates whether need to run evaluation on testing set')
    parser.add_argument('--save',
                        default=False,
                        type=bool,
                        help='flag for saving likelihood')
    args = parser.parse_args()
    args.paths = paths
    args.save_path = os.path.join(paths.inter_root, 'finetune', args.task)
    args.resume = os.path.join(paths.checkpoint_root, 'finetune',
                               '{}'.format(model_name), '{}'.format(args.task))
    return args