Beispiel #1
0
def __clf__main__(trainer_class=Trainer):
    # generate parser / parse parameters
    parser = get_parser()
    parser = add_argument(parser)
    params = parser.parse_args()
    params = from_config_file(params)

    if params.device not in ["cpu", "cuda"]:
        params.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
    else:
        params.device = torch.device(params.device)

    set_seeds(params.random_seed)
    params.pretrain = os.path.isfile(params.pretrain_config)
    if params.pretrain:
        params_pretrain = from_config_file(copy.deepcopy(params),
                                           config_file=params.pretrain_config)
        __main__(params_pretrain)
    else:
        params_pretrain = None

    # check parameters
    check_parameters(params)

    # run experiment
    main(params, params_pretrain, trainer_class=trainer_class)
Beispiel #2
0
import os
import random
from sklearn.preprocessing import normalize
import numpy as np
import utils
import torchfile
from params import get_parser

parser = get_parser()
params = parser.parse_args()

random.seed(params.seed)
DATA_ROOT = params.test_feats
partition = params.partition

img_embs = partition + '_img_embs.t7'
instr_embs = partition + '_instr_embs.t7'
test_ids = partition + '_ids.t7'

im_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT, img_embs)))
instr_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT, instr_embs)))
names = np.array(torchfile.load(os.path.join(DATA_ROOT, test_ids)))

# Sort based on names to always pick same samples for medr
names_str = []
for i in range(names.shape[0]):
    names_str.append(''.join(chr(k) for k in names[i]).split('\x00')[0])
names = np.array(names_str)
idxs = np.argsort(names)
names = names[idxs]
im_vecs = normalize(im_vecs)[idxs]
Beispiel #3
0
IMPATH = '../../data/recipe800k/images/'

def read_image(impath):
    im = Image.open(impath)
    im = im.resize((224,224))
    return im

def load_layer(json_file):
    with open(json_file) as f_layer:
        return json.load(f_layer)


ch = {'sushi':[],'pie':[],'pizza':[],'lasagna':[],'soup':[],'burger':[],
      'pasta':[],'salad':[],'smoothie':[],'cookie':[]}

parser = get_parser()
params = parser.parse_args()

#random.seed(params.seed)

DATA_ROOT = params.test_feats
IMPATH = os.path.join(params.dataset,'images')
partition = params.partition

img_embs = partition + '_img_embs.t7'
instr_embs = partition + '_instr_embs.t7'
test_ids = partition + '_ids.t7'

im_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT,img_embs)))
instr_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT,instr_embs)))
names = np.array(torchfile.load(os.path.join(DATA_ROOT,test_ids)))
Beispiel #4
0
            for eval_task in params.eval_tasks:
                end_of_epoch(params=eval_trainers[eval_task]["params"],
                             logger=logger,
                             trainer=eval_trainers[eval_task]['trainer'],
                             evaluator=eval_trainers[eval_task]['evaluator'],
                             eval_task=eval_task)

        # our
        logger.info("============ garbage collector collecting %d ..." %
                    gc.collect())


if __name__ == '__main__':

    # generate parser / parse parameters
    params = get_parser().parse_args()

    params = from_config_file(params)

    set_seeds(params.random_seed)

    if params.device not in ["cpu", "cuda"]:
        params.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
    else:
        params.device = torch.device(params.device)

    __main__(params)

    # run experiment
    main(params)
Beispiel #5
0
import os
import numpy as np
import torch
from transformers import BertTokenizer, BertModel
import logging
import simplejson as json
from tqdm import tqdm
import pickle

# =============================================================================
import params
parser = params.get_parser()
opts = parser.parse_args()
data_path, results_path, logdir = params.show_bert_opts(opts)
# =============================================================================
if opts.view_emb:
    from torch.utils.tensorboard import SummaryWriter
    pre_writer = SummaryWriter(os.path.join(logdir, "pre"))
# =============================================================================
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

### model の ロード
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Load pre-trained model tokenizer (vocabulary)
model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True) # Load pre-trained model (weights)
model.to(device)
model.eval() # Put the model in "evaluation" mode, meaning feed-forward operation.
print("Model loaded")


### json のロード
def load(file):