コード例 #1
0
ファイル: train.py プロジェクト: ihaeyong/r2c
    dest='folder',
    help='folder location',
    type=str,
)
parser.add_argument(
    '-no_tqdm',
    dest='no_tqdm',
    action='store_true',
)

args = parser.parse_args()

params = Params.from_file(args.params)
train, val, test = VCR.splits(
    mode='rationale' if args.rationale else 'answer',
    embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
    only_use_relevant_dets=params['dataset_reader'].get(
        'only_use_relevant_dets', True))
NUM_GPUS = torch.cuda.device_count()
NUM_CPUS = multiprocessing.cpu_count()
if NUM_GPUS == 0:
    raise ValueError("you need gpus!")


def _to_gpu(td):
    if NUM_GPUS > 1:
        return td
    for k in td:
        td[k] = {k2: v.cuda(non_blocking=True)
                 for k2, v in td[k].items()} if isinstance(
                     td[k], dict) else td[k].cuda(non_blocking=True)
コード例 #2
0
from dataloaders.vcr import VCR
from utils.extractionKeyword import extractionKeyword
from utils.extractionKnowledge import extractionKnowledge
from utils.topKknowledge import topKknowledge
import time  # this is test
import torch

train_answer = VCR('train', 'answer')
train_rationale = VCR('train', 'rationale')
val_answer = VCR('val', 'answer')
val_rationale = VCR('val', 'rationale')
#test_answer = VCR('test','answer')
#test_rationale = VCR('test', 'rationale')

# definee keyword extractor
keywordExtractor = extractionKeyword()
# define knowledge extractor
knowledgeExtractor = extractionKnowledge(5, 10)
# define topK extractor
topKExtractor = topKknowledge(50, 10)

print('start!!!')  # this is test
start = time.time()  # this is test
k = 0

for t_answer, t_rationale in zip(train_answer, train_rationale):
    answer_list = []
    rationale_list = []
    print('k : ', k)
    k += 1
    for i in range(4):
コード例 #3
0
        td[k] = {k2: torch.tensor(v).cuda()
                 for k2, v in td[k].items()} if isinstance(
                     td[k], dict) else td[k].cuda()
    return td


num_workers = (4 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) - 1
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {
    'batch_size': 96 // NUM_GPUS,
    'num_gpus': NUM_GPUS,
    'num_workers': num_workers
}

vcr_modes = VCR.eval_splits(
    embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
    only_use_relevant_dets=params['dataset_reader'].get(
        'only_use_relevant_dets', True))
probs_grp = []
ids_grp = []
for (vcr_dataset,
     mode_long) in zip(vcr_modes,
                       ['answer'] + [f'rationale_{i}' for i in range(4)]):
    mode = mode_long.split('_')[0]

    test_loader = VCRLoader.from_dataset(vcr_dataset, **loader_params)

    # Load the params again because allennlp will delete them... ugh.
    params = Params.from_file(args.params)
    print("Loading {} for {}".format(params['model'].get('type', 'WTF?'),
                                     mode),
          flush=True)
コード例 #4
0
import h5py
import numpy as np
import torch
from utils.detector_101 import SimpleDetector
from config import VCR_IMAGES_DIR, VCR_ANNOTS_DIR
from dataloaders.vcr import VCR, VCRLoader

################################################
#this is for vcr dataset
################################################

################################################
#data splits!
################################################
train, val = VCR.splits(mode='rationale',
                        embs_to_load='bert_da',
                        only_use_relevant_dets=False)

print('split is ok!')

################################################
#data loader!
################################################
loader_params = {'batch_size': 1, 'num_gpus': 1, 'num_workers': 1}
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)

print('loader is ok!')

################################################
#define detector!