コード例 #1
0
def exp_synset_to_word_cooccur():
    paths = [
        ['imagenet','gt_obj_hyp_cooccur'],
        ['genome_attributes','gt_attr_attr_cooccur'],
        ['genome_attributes','gt_obj_attr_cooccur'],
        ['genome_attributes','gt_context_cooccur'],
    ]
    for dataset,exp_name in paths:
        out_base_dir = os.path.join(
            os.getcwd(),
            f'symlinks/exp/{dataset}')
        exp_const = ExpConstants(exp_name,out_base_dir)

        data_const = Constants()
        data_const.synset_cooccur_json = os.path.join(
            exp_const.exp_dir,
            'synset_cooccur.json')
        data_const.word_cooccur_json = os.path.join(
            exp_const.exp_dir,
            'word_cooccur.json')

        synset_to_word_cooccur.main(exp_const,data_const)
コード例 #2
0
ファイル: run_template.py プロジェクト: gqh1995/vico
def exp_eval():
    exp_name = 'EXP_NAME'
    out_base_dir = os.path.join(os.getcwd(), 'symlinks/exp/EXP_GROUP')
    exp_const = ExpConstants(exp_name, out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')
    exp_const.batch_size = 32
    exp_const.num_workers = 5

    data_const = DATASET_CONSTANTS()
    data_const.subset = 'eval'

    model_const = Constants()
    model_const.model_num = None
    model_const.net = NET_CONSTANTS()
    model_const.net_path = os.path.join(exp_const.model_dir,
                                        f'net_{model_const.model_num}')

    evaluation.main(exp_const, data_const, model_const)
コード例 #3
0
def exp_agg_results():
    exp_name = 'agg_results'
    out_base_dir = os.path.join(os.getcwd(), 'symlinks/exp/cifar100')
    exp_const = ExpConstants(exp_name, out_base_dir)
    exp_const.runs_prefix = os.path.join(out_base_dir, 'zero_shot_')
    exp_const.runs = [0, 1, 2, 3]
    exp_const.prefix = {
        'GloVe': 'glove_100_0_held_classes_',
        #'ViCo(linear,100)': 'vico_linear_100_100_held_classes_',
        'GloVe+ViCo(linear,100)': 'glove_vico_linear_100_100_held_classes_',
        #'GloVe+ViCo(select,200)': 'glove_vico_select_100_200_held_classes_'
    }

    held_out_classes_list = [20]
    for held_out_classes in held_out_classes_list:
        exp_const.held_out_classes = held_out_classes
        agg_results.main(exp_const)
コード例 #4
0
def exp_combine_glove_with_visual_features():
    exp_name = 'concat_glove_and_ae_visual'
    out_base_dir = os.path.join(
        os.getcwd(),
        'symlinks/exp/google_images/' + \
        'normalized_resnet_embeddings_recon_loss_trained_on_google')
    exp_const = ExpConstants(exp_name, out_base_dir)

    visual_feat_dir = os.path.join(
        os.getcwd(),
        'symlinks/exp/google_images/' + \
        'normalized_resnet_features_recon_loss_trained_on_google/' + \
        'ae_visual_features')
    data_const = Constants()
    data_const.visual_features_idx = os.path.join(visual_feat_dir,
                                                  'word_to_idx.json')
    data_const.visual_features_h5py = os.path.join(visual_feat_dir,
                                                   'word_features.h5py')
    glove_const = GloveConstantsFactory.create()
    data_const.glove_idx = glove_const.word_to_idx_json
    data_const.glove_h5py = glove_const.embeddings_h5py

    combine_glove_with_visual_features.main(exp_const, data_const)
コード例 #5
0
def exp_train():
    args = parser.parse_args()
    not_specified_args = manage_required_args(
        args,
        parser,
        required_args=['fappend', 'imgs_per_batch', 'fp_to_tp_ratio'],
        optional_args=[
            'verb_given_appearance', 'verb_given_human_appearance',
            'verb_given_object_appearance',
            'verb_given_boxes_and_object_label', 'rcnn_det_prob'
        ])

    exp_name = 'factors'
    exp_name += '_' + args.fappend

    out_base_dir = os.path.join(os.getcwd(),
                                'data_symlinks/hico_exp/hoi_classifier')
    exp_const = ExpConstants(exp_name=exp_name, out_base_dir=out_base_dir)
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.num_epochs = 10
    exp_const.imgs_per_batch = args.imgs_per_batch
    exp_const.lr = 1e-3

    data_const_train = FeatureConstants(subset='train')
    data_const_val = FeatureConstants(subset='val')

    model_const = Constants()
    model_const.hoi_classifier = HoiClassifierConstants()
    model_const.hoi_classifier.verb_given_appearance = args.verb_given_appearance
    model_const.hoi_classifier.verb_given_human_appearance = args.verb_given_human_appearance
    model_const.hoi_classifier.verb_given_object_appearance = args.verb_given_object_appearance
    model_const.hoi_classifier.verb_given_boxes_and_object_label = args.verb_given_boxes_and_object_label
    model_const.hoi_classifier.verb_given_human_pose = args.verb_given_human_pose
    model_const.hoi_classifier.rcnn_det_prob = args.rcnn_det_prob

    train.main(exp_const, data_const_train, data_const_val, model_const)
コード例 #6
0
def exp_assign_pose_to_human_cand_hico():
    exp_name = 'hoi_candidates'
    exp_const = ExpConstants(exp_name=exp_name)
    data_const = HicoConstants()
    _exp_assign_pose_to_human_cand(exp_const, data_const)
コード例 #7
0
ファイル: run.py プロジェクト: gqh1995/vico
def exp_syn_cooccur():
    exp_name = 'syn_cooccur'
    out_base_dir = os.path.join(os.getcwd(), 'symlinks/exp/wordnet')
    exp_const = ExpConstants(exp_name, out_base_dir)

    syn_cooccur.main(exp_const)
コード例 #8
0
def main(**kwargs):
    exp_base_dir = coco_paths['exp_dir']
    if kwargs['dataset'] == 'flickr':
        exp_base_dir = flickr_paths['exp_dir']
    exp_const = ExpConstants(kwargs['exp_name'], exp_base_dir)
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'logs')
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')
    exp_const.dataset = kwargs['dataset']
    exp_const.optimizer = 'Adam'
    exp_const.lr = kwargs['lr']
    exp_const.momentum = None
    exp_const.num_epochs = 10
    exp_const.log_step = 20
    # Save models approx. twice every epoch
    exp_const.model_save_step = 400000 // (2 * kwargs['train_batch_size']
                                           )  # 4000=400000/(2*50)
    if exp_const.dataset == 'flickr':
        exp_const.model_save_step = 150000 // (2 * kwargs['train_batch_size'])
    val_freq_factor = 2
    if kwargs['val_frequently'] is True:
        val_freq_factor = 1
    exp_const.val_step = val_freq_factor * exp_const.model_save_step  # set to 1*model_save_step for plotting mi vs perf
    exp_const.num_val_samples = None
    exp_const.train_batch_size = kwargs['train_batch_size']
    exp_const.val_batch_size = 20
    exp_const.num_workers = 10
    exp_const.seed = 0
    exp_const.neg_noun_loss_wt = kwargs['neg_noun_loss_wt']
    exp_const.self_sup_loss_wt = kwargs['self_sup_loss_wt']
    exp_const.lang_sup_loss_wt = kwargs['lang_sup_loss_wt']
    exp_const.contextualize = not kwargs['no_context']
    exp_const.random_lang = kwargs['random_lang']

    DatasetConstants = CocoDatasetConstants
    if exp_const.dataset == 'flickr':
        DatasetConstants = FlickrDatasetConstants

    data_const = {
        'train': DatasetConstants('train'),
        'val': DatasetConstants('val'),
    }

    model_const = Constants()
    model_const.model_num = kwargs['model_num']
    model_const.object_encoder = ObjectEncoderConstants()
    model_const.object_encoder.context_layer.output_attentions = True
    model_const.object_encoder.object_feature_dim = 2048
    model_const.cap_encoder = CapEncoderConstants()
    model_const.cap_encoder.output_attentions = True
    model_const.cap_info_nce_layers = kwargs['cap_info_nce_layers']
    model_const.object_encoder_path = os.path.join(
        exp_const.model_dir, f'object_encoder_{model_const.model_num}')
    model_const.self_sup_criterion_path = os.path.join(
        exp_const.model_dir, f'self_sup_criterion_{model_const.model_num}')
    model_const.lang_sup_criterion_path = os.path.join(
        exp_const.model_dir, f'lang_sup_criterion_{model_const.model_num}')

    train(exp_const, data_const, model_const)
コード例 #9
0
def main(**kwargs):
    exp_base_dir = coco_paths['exp_dir']
    if kwargs['dataset'] == 'flickr':
        exp_base_dir = flickr_paths['exp_dir']
    exp_const = ExpConstants(kwargs['exp_name'], exp_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.seed = 0
    exp_const.contextualize = not kwargs['no_context']
    exp_const.random_lang = kwargs['random_lang']

    data_const = FlickrDatasetConstants(kwargs['subset'])

    model_const = Constants()
    model_const.object_encoder = ObjectEncoderConstants()
    model_const.object_encoder.context_layer.output_attentions = True
    model_const.object_encoder.object_feature_dim = 2048
    model_const.cap_encoder = CapEncoderConstants()
    model_const.cap_encoder.output_attentions = True
    model_const.cap_info_nce_layers = kwargs['cap_info_nce_layers']

    model_nums = find_all_model_numbers(exp_const.model_dir)
    for num in model_nums:
        continue
        if num <= 3000:
            continue

        model_const.model_num = num
        model_const.object_encoder_path = os.path.join(
            exp_const.model_dir, f'object_encoder_{model_const.model_num}')
        model_const.lang_sup_criterion_path = os.path.join(
            exp_const.model_dir, f'lang_sup_criterion_{model_const.model_num}')
        if exp_const.random_lang is True:
            model_const.cap_encoder_path = os.path.join(
                exp_const.model_dir, f'cap_encoder_{model_const.model_num}')

        filename = os.path.join(exp_const.exp_dir,
                                f'results_{data_const.subset}_{num}.json')

        if os.path.exists(filename):
            print(io.load_json_object(filename))
            continue

        eval_flickr_phrase_loc.main(exp_const, data_const, model_const)

    best_model_num = -1
    best_pt_recall = 0
    best_results = None
    for num in model_nums:
        filename = os.path.join(exp_const.exp_dir,
                                f'results_{data_const.subset}_{num}.json')

        if not os.path.exists(filename):
            continue

        results = io.load_json_object(filename)
        results['model_num'] = num
        print(results)
        if results['pt_recall'] >= best_pt_recall:
            best_results = results
            best_pt_recall = results['pt_recall']
            best_model_num = num

    print('-' * 80)
    best_results['model_num'] = best_model_num
    print(best_results)
    filename = os.path.join(exp_const.exp_dir,
                            f'results_{data_const.subset}_best.json')
    io.dump_json_object(best_results, filename)
コード例 #10
0
            rpn_id = human_rpn_ids[i]
            if rpn_id in seen_rpn_ids:
                continue
            else:
                seen_rpn_ids.add(rpn_id)
        
            img = bbox_utils.vis_human_keypts(img,keypts[i],modify=True)

            img_out_path = os.path.join(
                exp_const.exp_dir,
                f'{global_id}.png')
            skio.imsave(img_out_path,img)


if __name__=='__main__':
    exp_const = ExpConstants(exp_name='vis_human_pose')
    exp_const.max_count = 100

    data_const = HicoConstants()
    hoi_cand_dir = os.path.join(
        os.getcwd(),
        'data_symlinks/hico_exp/hoi_candidates')
    data_const.human_pose_feats_h5py = os.path.join(
        hoi_cand_dir,
        'human_pose_feats_test.hdf5')
    data_const.hoi_cand_h5py = os.path.join(
        hoi_cand_dir,
        'hoi_candidates_test.hdf5')
    data_const.num_keypts = 18

    main(exp_const,data_const)
コード例 #11
0
ファイル: run_template.py プロジェクト: gqh1995/vico
def exp_train():
    exp_name = 'EXP_NAME'
    out_base_dir = os.path.join(os.getcwd(), 'symlinks/exp/EXP_GROUP')
    exp_const = ExpConstants(exp_name, out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')
    exp_const.log_step = 10
    exp_const.model_save_step = 1000
    exp_const.val_step = 1000
    exp_const.num_val_samples = 1000
    exp_const.batch_size = 32
    exp_const.num_epochs = 1000
    exp_const.lr = 0.01
    exp_const.momentum = 0.9
    exp_const.num_workers = 5
    exp_const.optimizer = 'SGD'
    exp_const.subset = {'training': 'train', 'validation': 'val'}

    data_const = DATASET_CONSTANTS()

    model_const = Constants()
    model_const.model_num = None
    model_const.net = NET_CONSTANTS()
    model_const.net_path = os.path.join(exp_const.model_dir,
                                        f'net_{model_const.model_num}')

    train.main(exp_const, data_const, model_const)
コード例 #12
0
def exp_detect_coco_objects_in_vcoco():
    exp_const = ExpConstants(exp_name='detect_coco_objects_in_vcoco',
                             out_base_dir=os.path.join(
                                 os.getcwd(), 'data_symlinks/vcoco_exp'))
    data_const = VcocoConstants()
    _exp_detect_coco_objects(exp_const, data_const)
コード例 #13
0
def exp_train():
    args = parser.parse_args()
    not_specified_args = manage_required_args(
        args,
        parser,
        required_args=[
            'embed_dim',
            'xform',
            'model_num',
            'syn'])

    exp_name = f'{args.xform}_{args.embed_dim}'
    out_base_dir = os.path.join(
        os.getcwd(),
        'symlinks/exp/multi_sense_cooccur')
    exp_const = ExpConstants(exp_name,out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir,'models')
    exp_const.log_dir = os.path.join(exp_const.exp_dir,'log')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir,'vis')
    exp_const.log_step = 100
    exp_const.model_save_step = 10000
    exp_const.batch_size = 1000
    exp_const.num_epochs = 10
    exp_const.lr = 0.01
    exp_const.momentum = 0.9    # used only when optimizer is set to 'SGD'
    exp_const.num_workers = 5
    # First train with Adam then finetune with Adagrad
    if args.model_num==-1:
        exp_const.optimizer = 'Adam'
    else:
        exp_const.optimizer = 'Adagrad'
    exp_const.weight_decay = 0
    exp_const.cooccur_weights = {
        'syn': 1,
        'attr_attr': 1,
        'obj_attr': 1,
        'obj_hyp': 1,
        'context': 1,
    }
    if args.syn==False:
        del exp_const.cooccur_weights['syn']

    exp_const.use_neg = True
    
    data_const = MultiSenseCooccurDatasetConstants()
    data_const.cooccur_csv = os.path.join(
        os.getcwd(),
        'symlinks/exp/multi_sense_cooccur/cooccurrences/merged_cooccur.csv')
    data_const.use_self_count = True

    model_const = Constants()
    if args.model_num==-1:
        model_const.model_num = None
    else:
        model_const.model_num = args.model_num
    model_const.net = LogBilinearConstants()
    model_const.net.num_words = 93553
    model_const.net.embed_dims = args.embed_dim
    model_const.net.two_embedding_layers = False
    model_const.net.xform_type = args.xform
    model_const.net.xform_num_layers = None
    model_const.net.use_bias = True
    model_const.net.use_fx = False
    model_const.net.cooccur_types = [
        'syn',
        'attr_attr',
        'obj_attr',
        'obj_hyp',
        'context'
    ]
    if args.syn==False:
        model_const.net.cooccur_types = model_const.net.cooccur_types[1:]

    model_const.net_path = os.path.join(
        exp_const.model_dir,
        f'net_{model_const.model_num}')

    train.main(exp_const,data_const,model_const)
コード例 #14
0
def exp_select_and_evaluate_confident_boxes_in_hico():
    exp_name = 'select_confident_boxes_in_hico'
    exp_const = ExpConstants(exp_name=exp_name)
    data_const = HicoConstants()
    _exp_select_and_evaluate_confident_boxes(exp_const, data_const)
コード例 #15
0
def exp_supervised_partitioning():
    exp_name = 'supervised_partitioning'
    out_base_dir = os.path.join(
        os.getcwd(),
        'symlinks/exp/multi_sense_cooccur/analysis')
    exp_const = ExpConstants(exp_name,out_base_dir)

    data_const = Constants()

    glove_vico_linear_100 = os.path.join(
        os.getcwd(),
        'symlinks/exp/multi_sense_cooccur/linear_100')

    """
    Update data_const.embed_info dictionary to control which embeddings are 
    evaluated. To evaluate your own embeddings create a class object with the 
    following 2 attributes:
    - `word_to_idx_json`: path to your word_to_idx.json file
    - `word_vecs_h5py`: path to your word_vecs.h5py file
    The class should also have a `get_embedding` function that accepts the
    read embeddings as argument and returns a modified version of it (eg. 
    reading only some of the embedding dimensions). 
     
    Class `EmbedInfo` is an example of such a class which dynamically creates 
    the 2 attributes and get_embedding function based on certain arguments.

    However the simplest case could look like `SimpleEmbedInfo` above.
    """
    data_const.embed_info = {
        'GloVe': EmbedInfo(
            glove_vico_linear_100,
            False,
            'glove', # Only glove component
            vico_dim=100,
            glove_dim=300), 
        'ViCo(linear,100)': EmbedInfo(
            glove_vico_linear_100,
            False,
            'visual', # Only visual component
            vico_dim=100,
            glove_dim=300), 
        'GloVe+ViCo(linear,100)': EmbedInfo(
            glove_vico_linear_100,
            False,
            'both', # Concatenated
            vico_dim=100,
            glove_dim=300),
        # 'GloVe+ViCo(linear,100)[paper]': SimpleEmbedInfo(
        #     os.path.join(
        #         os.getcwd(),
        #         'symlinks/exp/multi_sense_cooccur/paper/linear_100/visual_word_vecs_idx.json'),
        #     os.path.join(
        #         os.getcwd(),
        #         'symlinks/exp/multi_sense_cooccur/paper/linear_100/visual_word_vecs.h5py'))
    }
    
    exp_const.fine = True
    supervised_partitioning.main(exp_const,data_const)

    exp_const.fine = False
    supervised_partitioning.main(exp_const,data_const)
コード例 #16
0
def exp_train_concat_svm():
    args = parser.parse_args()
    not_specified_args = manage_required_args(args,
                                              parser,
                                              required_args=[
                                                  'lr',
                                                  'l2_weight',
                                                  'batch_size',
                                                  'glove_dim',
                                                  'embed_linear_feat',
                                                  'embed_quadratic_feat',
                                                  'distance_linear_feat',
                                                  'distance_quadratic_feat',
                                                  'visual_only',
                                              ],
                                              optional_args=[
                                                  'exp_name', 'out_base_dir',
                                                  'embeddings_h5py',
                                                  'word_to_idx_json'
                                              ])

    if args.exp_name is None:
        exp_name = 'trial'
    else:
        exp_name = args.exp_name

    if args.out_base_dir is None:
        out_base_dir = os.path.join(os.getcwd(),
                                    'symlinks/exp/semeval_2018_10/concat_svm')
    else:
        out_base_dir = args.out_base_dir

    exp_const = ExpConstants(exp_name=exp_name, out_base_dir=out_base_dir)
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.num_epochs = 20
    exp_const.batch_size = args.batch_size
    exp_const.lr = args.lr

    data_const = SemEval201810DatasetConstants()
    if args.embeddings_h5py is None:
        glove_const = GloveConstantsFactory.create()
        data_const.embeddings_h5py = glove_const.embeddings_h5py
        data_const.word_to_idx_json = glove_const.word_to_idx_json
    else:
        data_const.embeddings_h5py = args.embeddings_h5py
        data_const.word_to_idx_json = args.word_to_idx_json

    embed_dim = h5py.File(data_const.embeddings_h5py,
                          'r')['embeddings'].shape[1]

    model_const = Constants()
    model_const.concat_svm = ConcatSVMConstants()
    model_const.concat_svm.l2_weight = args.l2_weight
    model_const.concat_svm.embedding_dim = embed_dim
    model_const.concat_svm.glove_dim = args.glove_dim
    model_const.concat_svm.layer_units = []
    model_const.concat_svm.use_embedding_linear_feats = args.embed_linear_feat
    model_const.concat_svm.use_embedding_quadratic_feats = \
        args.embed_quadratic_feat
    model_const.concat_svm.use_distance_linear_feats = args.distance_linear_feat
    model_const.concat_svm.use_distance_quadratic_feats = \
        args.distance_quadratic_feat
    model_const.concat_svm.visual_only = args.visual_only

    train_concat_svm.main(exp_const, data_const, model_const)
コード例 #17
0
def exp_eval_concat_svm():
    args = parser.parse_args()
    not_specified_args = manage_required_args(args,
                                              parser,
                                              required_args=[
                                                  'batch_size',
                                                  'glove_dim',
                                                  'embed_linear_feat',
                                                  'embed_quadratic_feat',
                                                  'distance_linear_feat',
                                                  'distance_quadratic_feat',
                                                  'visual_only',
                                                  'visual_vocab_json',
                                              ],
                                              optional_args=[
                                                  'exp_name', 'out_base_dir',
                                                  'embeddings_h5py',
                                                  'word_to_idx_json'
                                              ])

    if args.exp_name is None:
        exp_name = 'trial'
    else:
        exp_name = args.exp_name

    if args.out_base_dir is None:
        out_base_dir = os.path.join(os.getcwd(),
                                    'symlinks/exp/semeval_2018_10/concat_svm')
    else:
        out_base_dir = args.out_base_dir

    exp_const = ExpConstants(exp_name=exp_name, out_base_dir=out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.batch_size = 2560

    data_const = SemEval201810DatasetConstants()
    data_const.subset = 'test'
    if args.embeddings_h5py is None:
        glove_const = GloveConstantsFactory.create()
        data_const.embeddings_h5py = glove_const.embeddings_h5py
        data_const.word_to_idx_json = glove_const.word_to_idx_json
        data_const.visual_vocab_json = data_const.word_to_idx_json
    else:
        data_const.embeddings_h5py = args.embeddings_h5py
        data_const.word_to_idx_json = args.word_to_idx_json
        data_const.visual_vocab_json = args.visual_vocab_json
    # data_const.vocab_json = os.path.join(
    #     os.getcwd(),
    #     'symlinks/data/visualgenome/proc/all_word_freqs.json')
    # data_const.visual_vocab_json = os.path.join(
    #     os.getcwd(),
    #     'symlinks/exp/combine_glove_visual_reps/concat_glove_visual_avg_reps/visual_words.json')

    embed_dim = h5py.File(data_const.embeddings_h5py,
                          'r')['embeddings'].shape[1]

    model_const = Constants()
    model_const.concat_svm = ConcatSVMConstants()
    model_const.concat_svm.embedding_dim = embed_dim
    model_const.concat_svm.glove_dim = args.glove_dim
    model_const.concat_svm.layer_units = []
    model_const.concat_svm.use_embedding_linear_feats = args.embed_linear_feat
    model_const.concat_svm.use_embedding_quadratic_feats = \
        args.embed_quadratic_feat
    model_const.concat_svm.use_distance_linear_feats = args.distance_linear_feat
    model_const.concat_svm.use_distance_quadratic_feats = \
        args.distance_quadratic_feat
    model_const.concat_svm.visual_only = args.visual_only

    eval_concat_svm.main(exp_const, data_const, model_const)
コード例 #18
0
def main(**kwargs):
    exp_const = ExpConstants(kwargs['exp_name'], kwargs['exp_base_dir'])
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'logs')
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')
    exp_const.optimizer = 'Adam'
    exp_const.lr = 1e-3
    exp_const.momentum = None
    exp_const.num_epochs = 100
    exp_const.log_step = 100
    exp_const.model_save_step = 1000
    exp_const.val_step = 1000
    exp_const.num_val_samples = None

    data_const = {'train': Constants(), 'val': Constants()}

    model_const = Constants()
    model_const.model_num = kwargs['model_num']
    model_const.net = Constants()
    model_const.net_path = os.path.join(exp_const.model_dir,
                                        f'net_{model_const.model_num}')
コード例 #19
0
def exp_cache_pose_feats_hico():
    exp_name = 'hoi_candidates'
    exp_const = ExpConstants(exp_name=exp_name)
    data_const = HicoConstants()
    _exp_cache_pose_feats(exp_const, data_const)
コード例 #20
0
def exp_cache_pose_feats_vcoco():
    exp_name = 'hoi_candidates'
    exp_const = ExpConstants(exp_name=exp_name, out_base_dir=os.path.join(os.getcwd(), 'data_symlinks/vcoco_exp'))
    data_const = VcocoConstants()
    _exp_cache_pose_feats(exp_const, data_const)
コード例 #21
0
def exp_train():
    args = parser.parse_args()
    not_specified_args = manage_required_args(args,
                                              parser,
                                              required_args=[
                                                  'held_classes', 'embed_type',
                                                  'glove_dim', 'vico_dim',
                                                  'run'
                                              ],
                                              optional_args=[])
    exp_name = \
        args.embed_type + '_' + \
        str(args.glove_dim) + '_' + \
        str(args.vico_dim) + '_' + \
        'held_classes_' + str(args.held_classes)
    out_base_dir = os.path.join(os.getcwd(),
                                f'symlinks/exp/cifar100/zero_shot_{args.run}')
    exp_const = ExpConstants(exp_name, out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')
    exp_const.log_step = 200
    exp_const.model_save_step = 1000
    exp_const.val_step = 1000
    exp_const.batch_size = 128
    exp_const.num_epochs = 50  #100
    exp_const.lr = 0.01
    exp_const.momentum = 0.9
    exp_const.num_workers = 5
    exp_const.optimizer = 'Adam'
    exp_const.feedforward = False
    exp_const.subset = {'training': 'train', 'test': 'test'}

    data_const = Cifar100DatasetConstants()
    data_const.num_held_out_classes = args.held_classes

    model_const = Constants()
    model_const.model_num = None
    model_const.net = ResnetConstants()
    model_const.net.num_layers = 32
    model_const.net.num_classes = 100
    model_const.net.pretrained = False
    model_const.net_path = os.path.join(exp_const.model_dir,
                                        f'net_{model_const.model_num}')
    model_const.embed2class = Embed2ClassConstants()
    model_const.embed2class.linear = True
    model_const.embed2class_path = os.path.join(
        exp_const.model_dir, f'embed2class_{model_const.model_num}')
    model_const.embed2class.glove_dim = args.glove_dim

    # Dimensions
    if args.embed_type == 'glove':
        model_const.embed2class.embed_dims = args.glove_dim
        model_const.embed2class.embed_h5py = os.path.join(
            os.getcwd(),
            f'symlinks/data/glove/proc/glove_6B_{args.glove_dim}d.h5py')
        model_const.embed2class.embed_word_to_idx_json = os.path.join(
            os.getcwd(),
            f'symlinks/data/glove/proc/glove_6B_{args.glove_dim}d_word_to_idx.json'
        )
    elif args.embed_type == 'glove_vico_linear':
        model_const.embed2class.embed_dims = args.glove_dim + args.vico_dim
        embed_dir = os.path.join(
            os.getcwd(),
            'symlinks/exp/multi_sense_cooccur/' + \
            f'linear_100/concat_with_glove_{args.glove_dim}')
        model_const.embed2class.embed_h5py = os.path.join(
            embed_dir, 'visual_word_vecs.h5py')
        model_const.embed2class.embed_word_to_idx_json = os.path.join(
            embed_dir, 'visual_word_vecs_idx.json')
    elif args.embed_type == 'vico_linear':
        model_const.embed2class.no_glove = True  # Zero out the glove component
        model_const.embed2class.embed_dims = args.glove_dim + args.vico_dim
        embed_dir = os.path.join(
            os.getcwd(),
            'symlinks/exp/multi_sense_cooccur/' + \
            f'linear_100/concat_with_glove_{args.glove_dim}')
        model_const.embed2class.embed_h5py = os.path.join(
            embed_dir, 'visual_word_vecs.h5py')
        model_const.embed2class.embed_word_to_idx_json = os.path.join(
            embed_dir, 'visual_word_vecs_idx.json')
    elif args.embed_type == 'glove_vico_select':
        model_const.embed2class.embed_dims = args.glove_dim + args.vico_dim
        embed_dir = os.path.join(
            os.getcwd(),
            'symlinks/exp/multi_sense_cooccur/' + \
            f'select_200/concat_with_glove_{args.glove_dim}')
        model_const.embed2class.embed_h5py = os.path.join(
            embed_dir, 'visual_word_vecs.h5py')
        model_const.embed2class.embed_word_to_idx_json = os.path.join(
            embed_dir, 'visual_word_vecs_idx.json')
    else:
        err_str = f'{args.embed_type} is currently not implemented in the runner'
        assert (False), err_str

    train.main(exp_const, data_const, model_const)
コード例 #22
0
def exp_detect_coco_objects_in_hico():
    exp_const = ExpConstants(exp_name='detect_coco_objects_in_hico')
    data_const = HicoConstants()
    _exp_detect_coco_objects(exp_const, data_const)
コード例 #23
0
def exp_train():
    args = parser.parse_args()

    # create experiments directory and required folders
    out_base_dir = os.path.join(os.getcwd(), f'exp/{args.dataset_type}')
    exp_const = ExpConstants(args.run_name, out_base_dir)
    exp_const.model_dir = os.path.join(exp_const.exp_dir, 'models')
    exp_const.log_dir = os.path.join(exp_const.exp_dir, 'log')
    exp_const.vis_dir = os.path.join(exp_const.exp_dir, 'vis')

    use_cuda = torch.cuda.is_available()
    exp_const.device = "cuda:0" if use_cuda else "cpu"

    # tranining params
    exp_const.optimizer = args.optimizer
    exp_const.num_epochs = args.num_epochs
    exp_const.batch_size = args.batch_size
    exp_const.lr = args.lr
    exp_const.momentum = args.momentum
    exp_const.num_workers = args.num_workers

    # logging, saving
    exp_const.log_step = args.log_step
    exp_const.model_save_epoch = args.model_save_epoch
    exp_const.val_epoch = args.val_epoch
    exp_const.subset = {'training': 'train', 'test': 'test'}

    # dataset
    data_const = DatasetConstants(root=args.dataroot,
                                  download=args.download_dataset,
                                  train=True)
    data_const.dataset_type = args.dataset_type

    # model (resnet and attribute embeddings)
    model_const = Constants()
    model_const.model_num = None
    model_const.sim_loss = args.sim_loss
    model_const.ce_loss_warmup = args.ce_loss_warmup

    model_const.net = ResnetConstants()
    if args.dataset_type == 'Cifar100':
        model_const.net.num_layers = "cifar100"  # a custom resnet for cifar100, to adjust the dimensions of the feature maps
        model_const.net.num_classes = 100
    else:
        model_const.net.num_layers = args.num_layers
        if args.dataset_type == "Imagenet":
            model_const.net.num_classes = 1000
        elif args.dataset_type == "VOC":
            model_const.net.num_classes = 20
        elif args.dataset_type == "STL10":
            model_const.net.num_layers = 'cifar100'  # TODO: deeper resnets does not work on STL10.
            model_const.net.num_classes = 10

    model_const.net.pretrained = False
    model_const.net_path = os.path.join(exp_const.model_dir,
                                        f'net_{model_const.model_num}')

    model_const.attr_embed = AttributeEmbeddingsConstants()
    model_const.attr_embed_path = os.path.join(
        exp_const.model_dir, f'attr_embed_{model_const.model_num}')
    model_const.attr_embed.glove_dim = 300
    model_const.attr_embed.num_classes = model_const.net.num_classes

    # attribute embedding dimensions
    if args.embed_type == 'vico_linear':
        model_const.attr_embed.no_glove = True  # Zero out the glove component
        model_const.attr_embed.embed_dims = 300 + args.vico_dim
        embed_dir = os.path.join(
            os.getcwd(),
            'data/pretrained-embeddings/' + \
            f'glove_300_vico_linear_100/')
        model_const.attr_embed.embed_h5py = os.path.join(
            embed_dir, 'visual_word_vecs.h5py')
        model_const.attr_embed.embed_word_to_idx_json = os.path.join(
            embed_dir, 'visual_word_vecs_idx.json')
    elif args.embed_type == 'vico_select':
        model_const.attr_embed.no_glove = True  # Zero out the glove component
        model_const.attr_embed.hypernym = args.hypernym
        model_const.attr_embed.embed_dims = 300 + args.vico_dim

        embed_dir = os.path.join(
            os.getcwd(),
            'data/pretrained-embeddings/' + \
            f'glove_300_vico_select_200/')
        model_const.attr_embed.embed_h5py = os.path.join(
            embed_dir, 'visual_word_vecs.h5py')
        model_const.attr_embed.embed_word_to_idx_json = os.path.join(
            embed_dir, 'visual_word_vecs_idx.json')
    else:
        err_str = f'{args.embed_type} is currently not implemented in the runner'
        assert (False), err_str

    # pass all constants to training method
    train.main(exp_const, data_const, model_const)