Exemplo n.º 1
0
def serve(settings, hpctl_logging, embeddings, datasets, unknown, **kwargs):
    hp_settings, mead_settings = get_settings(settings)
    load_user_modules({}, hp_settings)
    frontend_config, backend_config = get_ends(hp_settings, unknown)
    hp_logs, _ = get_logs(hp_settings, {}, hpctl_logging)
    xpctl_config = get_xpctl_settings(mead_settings)
    set_root(hp_settings)

    datasets = read_config_file_or_json(datasets)
    embeddings = read_config_file_or_json(embeddings)

    results = get_results({})
    backend = get_backend(backend_config)
    logs = get_log_server(hp_logs)

    xpctl = get_xpctl(xpctl_config)

    frontend_config['type'] = 'flask'
    frontend_config['datasets'] = index_by_label(datasets)
    frontend_config['embeddings'] = index_by_label(embeddings)
    frontend = get_frontend(frontend_config, results, xpctl)
    scheduler = RoundRobinScheduler()

    cache = mead_settings.get('datacache', '~/.bl-data')

    try:
        run_forever(results, backend, scheduler, frontend, logs, cache, xpctl_config, datasets, embeddings)
    except KeyboardInterrupt:
        pass
Exemplo n.º 2
0
    def initialize(self, embeddings):
        embeddings = read_config_file_or_json(embeddings, 'embeddings')
        embeddings_set = index_by_label(embeddings)
        self.dataset = DataDownloader(self.dataset,
                                      self.data_download_cache).download()
        print_dataset_info(self.dataset)
        vocab1, vocab2 = self.reader.build_vocabs(
            [
                self.dataset['train_file'], self.dataset['valid_file'],
                self.dataset['test_file']
            ],
            min_f=Task._get_min_f(self.config_params),
            vocab_file=self.dataset.get('vocab_file'))

        # To keep the config file simple, share a list between source and destination (tgt)
        features_src = []
        features_tgt = None
        for feature in self.config_params['features']:
            if feature['name'] == 'tgt':
                features_tgt = feature
            else:
                features_src += [feature]

        self.src_embeddings, self.feat2src = self._create_embeddings(
            embeddings_set, vocab1, features_src)
        # For now, dont allow multiple vocabs of output
        baseline.save_vocabs(self.get_basedir(), self.feat2src)
        self.tgt_embeddings, self.feat2tgt = self._create_embeddings(
            embeddings_set, {'tgt': vocab2}, [features_tgt])
        baseline.save_vocabs(self.get_basedir(), self.feat2tgt)
        self.tgt_embeddings = self.tgt_embeddings['tgt']
        self.feat2tgt = self.feat2tgt['tgt']
Exemplo n.º 3
0
    def initialize(self, embeddings):
        embeddings = read_config_file_or_json(embeddings, 'embeddings')
        embeddings_set = index_by_label(embeddings)
        self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()
        print_dataset_info(self.dataset)
        vocab1, vocab2 = self.reader.build_vocabs(
            [self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']],
            min_f=Task._get_min_f(self.config_params),
            vocab_file=self.dataset.get('vocab_file')
        )

        # To keep the config file simple, share a list between source and destination (tgt)
        features_src = []
        features_tgt = None
        for feature in self.config_params['features']:
            if feature['name'] == 'tgt':
                features_tgt = feature
            else:
                features_src += [feature]

        self.src_embeddings, self.feat2src = self._create_embeddings(embeddings_set, vocab1, features_src)
        # For now, dont allow multiple vocabs of output
        baseline.save_vocabs(self.get_basedir(), self.feat2src)
        self.tgt_embeddings, self.feat2tgt = self._create_embeddings(embeddings_set, {'tgt': vocab2}, [features_tgt])
        baseline.save_vocabs(self.get_basedir(), self.feat2tgt)
        self.tgt_embeddings = self.tgt_embeddings['tgt']
        self.feat2tgt = self.feat2tgt['tgt']
Exemplo n.º 4
0
    def read_config(self, config_params, datasets_index, **kwargs):
        """
        Read the config file and the datasets index

        Between the config file and the dataset index, we have enough information
        to configure the backend and the models.  We can also initialize the data readers

        :param config_file: The config file
        :param datasets_index: The index of datasets
        :return:
        """
        datasets_index = read_config_file_or_json(datasets_index, 'datasets')
        datasets_set = index_by_label(datasets_index)
        self.config_params = config_params
        basedir = self.get_basedir()
        if basedir is not None and not os.path.exists(basedir):
            logger.info('Creating: %s', basedir)
            os.makedirs(basedir)
        self.config_params['train']['basedir'] = basedir
        # Read GPUS from env variables now so that the reader has access
        if self.config_params['model'].get('gpus', -1) == -1:
            self.config_params['model']['gpus'] = len(get_env_gpus())
        self.config_file = kwargs.get('config_file')
        self._setup_task(**kwargs)
        self._load_user_modules()
        self._configure_reporting(config_params.get('reporting', {}), **kwargs)
        self.dataset = get_dataset_from_key(self.config_params['dataset'],
                                            datasets_set)
        self.reader = self._create_task_specific_reader()
Exemplo n.º 5
0
    def read_config(self, config_params, datasets_index, **kwargs):
        """
        Read the config file and the datasets index

        Between the config file and the dataset index, we have enough information
        to configure the backend and the models.  We can also initialize the data readers

        :param config_file: The config file
        :param datasets_index: The index of datasets
        :return:
        """
        datasets_index = read_config_file_or_json(datasets_index, 'datasets')
        datasets_set = index_by_label(datasets_index)
        self.config_params = config_params
        basedir = self.get_basedir()
        if basedir is not None and not os.path.exists(basedir):
            logger.info('Creating: %s', basedir)
            os.makedirs(basedir)
        self.config_params['train']['basedir'] = basedir
        # Read GPUS from env variables now so that the reader has access
        if self.config_params['model'].get('gpus', -1) == -1:
            self.config_params['model']['gpus'] = len(get_env_gpus())
        self.config_file = kwargs.get('config_file')
        self._setup_task(**kwargs)
        self._load_user_modules()
        self._configure_reporting(config_params.get('reporting', {}), **kwargs)
        self.dataset = get_dataset_from_key(self.config_params['dataset'], datasets_set)
        self.reader = self._create_task_specific_reader()
 def initialize(self, embeddings):
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     self.config_params['keep_unused'] = True
     features = self.config_params['features']
     self.embeddings, self.feat2index = self._create_embeddings(
         embeddings_set, defaultdict(dict), self.config_params['features'])
     save_vocabs(self.get_basedir(), self.feat2index)
Exemplo n.º 7
0
 def initialize(self, embeddings):
     self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()
     print_dataset_info(self.dataset)
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     vocabs = self.reader.build_vocab(
         [self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']],
         min_f=Task._get_min_f(self.config_params),
     )
     self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocabs, self.config_params['features'])
     baseline.save_vocabs(self.get_basedir(), self.feat2index)
Exemplo n.º 8
0
 def initialize(self, embeddings):
     self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()
     print_dataset_info(self.dataset)
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     vocabs = self.reader.build_vocab(
         [self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']],
         min_f=Task._get_min_f(self.config_params),
     )
     self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocabs, self.config_params['features'])
     baseline.save_vocabs(self.get_basedir(), self.feat2index)
Exemplo n.º 9
0
 def initialize(self, embeddings):
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()
     print_dataset_info(self.dataset)
     vocab_sources = [self.dataset['train_file'], self.dataset['valid_file']]
     # TODO: make this optional
     if 'test_file' in self.dataset:
         vocab_sources.append(self.dataset['test_file'])
     vocabs = self.reader.build_vocab(vocab_sources,
                                      min_f=Task._get_min_f(self.config_params),
                                      vocab_file=self.dataset.get('vocab_file'))
     self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocabs, self.config_params['features'])
     baseline.save_vocabs(self.get_basedir(), self.feat2index)
Exemplo n.º 10
0
 def initialize(self, embeddings):
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     self.dataset = DataDownloader(self.dataset,
                                   self.data_download_cache).download()
     print("[train file]: {}\n[valid file]: {}\n[test file]: {}".format(
         self.dataset['train_file'], self.dataset['valid_file'],
         self.dataset['test_file']))
     vocab, self.num_elems = self.reader.build_vocab([
         self.dataset['train_file'], self.dataset['valid_file'],
         self.dataset['test_file']
     ])
     self.embeddings, self.feat2index = self._create_embeddings(
         embeddings_set, vocab)
Exemplo n.º 11
0
    def read_config(self, config_params, datasets_index, vecs_index, **kwargs):
        """
        Read the config file and the datasets index

        Between the config file and the dataset index, we have enough information
        to configure the backend and the models.  We can also initialize the data readers

        :param config_file: The config file
        :param datasets_index: The index of datasets
        :return:
        """
        datasets_index = read_config_file_or_json(datasets_index, 'datasets')
        datasets_set = index_by_label(datasets_index)
        vecs_index = read_config_file_or_json(vecs_index, 'vecs')
        vecs_set = index_by_label(vecs_index)
        self.config_params = config_params
        config_file = deepcopy(config_params)
        basedir = self.get_basedir()
        if basedir is not None and not os.path.exists(basedir):
            logger.info('Creating: %s', basedir)
            os.makedirs(basedir)
        progress_bar_type = self.config_params.get('progress_bar', os.getenv('MEAD_PROGRESS_BAR'))
        if progress_bar_type is not None:
            logger.info("Setting progress bar type %s", progress_bar_type)
            SET_DEFAULT_PROGRESS_BAR(progress_bar_type)
        self.config_params['train']['basedir'] = basedir
        # Read GPUS from env variables now so that the reader has access
        if self.config_params['train'].get('gpus', -1) == -1:
            self.config_params['train']['gpus'] = len(get_env_gpus())
        self._setup_task(**kwargs)
        self._load_user_modules()
        self.dataset = get_dataset_from_key(self.config_params['dataset'], datasets_set)
        # replace dataset in config file by the latest dataset label, this will be used by some reporting hooks
        config_file['dataset'] = self.dataset['label']
        self._configure_reporting(config_params.get('reporting', {}), config_file=config_file, **kwargs)
        self.reader = self._create_task_specific_reader(vecs_set)
Exemplo n.º 12
0
    def read_config(self, config_params, datasets_index, **kwargs):
        """
        Read the config file and the datasets index

        Between the config file and the dataset index, we have enough information
        to configure the backend and the models.  We can also initialize the data readers

        :param config_file: The config file
        :param datasets_index: The index of datasets
        :return:
        """
        datasets_index = read_config_file_or_json(datasets_index, 'datasets')
        datasets_set = index_by_label(datasets_index)
        self.config_params = config_params
        self.config_file = kwargs.get('config_file')
        self._setup_task()
        self._configure_reporting(config_params.get('reporting', {}),
                                  self.task_name, **kwargs)
        self.dataset = datasets_set[self.config_params['dataset']]
        self.reader = self._create_task_specific_reader()
Exemplo n.º 13
0
def putresult(task, config, log, dataset, user, label, cbase, cstore):
    """
    Puts the results in a database. provide task name, config file, the reporting log file, and the dataset index file
    used in the experiment. Optionally can put the model files in a persistent storage.
    """
    logf = log.format(task)
    if not os.path.exists(logf):
        click.echo(click.style("the log file at {} doesn't exist, provide a valid location".format(logf), fg='red'))
        return
    if not os.path.exists(config):
        click.echo(click.style("the config file at {} doesn't exist, provide a valid location".format(config), fg='red'))
        return
    if not os.path.exists(dataset):
        click.echo(click.style("the dataset file at {} doesn't exist, provide a valid location".format(dataset), fg='red'))
        return
    config_obj = read_config_file(config)
    datasets_set = index_by_label(read_config_file(dataset))
    dataset_key = config_obj['dataset']
    dataset_key = get_dataset_from_key(dataset_key, datasets_set)
    config_obj['dataset'] = dataset_key['label']
    ServerManager.get()
    result = ServerManager.api.put_result(task, to_swagger_experiment(task, config_obj, log, username=user, label=label))
    if result.response_type == 'success':
        eid = result.message
        click.echo(click.style('results stored with experiment: {}'.format(result.message), fg='green'))
        if cbase is None:
            return
        result = store_model(checkpoint_base=cbase, config_sha1=hash_config(read_config_file(config)),
                             checkpoint_store=cstore, print_fn=click.echo)
        if result is not None:
            click.echo(click.style('model stored at {}'.format(result), fg='green'))
            update_result = ServerManager.api.update_property(task, eid, prop='checkpoint', value=result)
            if update_result.response_type == 'success':
                click.echo(click.style(update_result.message, fg='green'))
            else:
                click.echo(click.style(update_result.message, fg='red'))
        else:
            click.echo(click.style('failed to store model'.format(result), fg='red'))
    else:
        click.echo(click.style(result.message, fg='red'))
Exemplo n.º 14
0
 def initialize(self, embeddings):
     embeddings = read_config_file_or_json(embeddings, 'embeddings')
     embeddings_set = index_by_label(embeddings)
     self.dataset = DataDownloader(self.dataset, self.data_download_cache,
                                   True).download()
     print(
         "[train file]: {}\n[valid file]: {}\n[test file]: {}\n[vocab file]: {}"
         .format(self.dataset['train_file'], self.dataset['valid_file'],
                 self.dataset['test_file'],
                 self.dataset.get('vocab_file', "None")))
     vocab_file = self.dataset.get('vocab_file', None)
     if vocab_file is not None:
         vocab1, vocab2 = self.reader.build_vocabs([vocab_file])
     else:
         vocab1, vocab2 = self.reader.build_vocabs([
             self.dataset['train_file'], self.dataset['valid_file'],
             self.dataset['test_file']
         ])
     self.embeddings1, self.feat2index1 = self._create_embeddings(
         embeddings_set, {'word': vocab1})
     self.embeddings2, self.feat2index2 = self._create_embeddings(
         embeddings_set, {'word': vocab2})
Exemplo n.º 15
0
import os
import argparse
from baseline.utils import read_json
from mead.utils import index_by_label, convert_path
from mead.downloader import EmbeddingDownloader, DataDownloader


parser = argparse.ArgumentParser(description="Download all data and embeddings.")
parser.add_argument("--cache", default="~/.bl-data", type=os.path.expanduser, help="Location of the data cache")
parser.add_argument('--datasets', help='json library of dataset labels', default='config/datasets.json', type=convert_path)
parser.add_argument('--embeddings', help='json library of embeddings', default='config/embeddings.json', type=convert_path)
args = parser.parse_args()


datasets = read_json(args.datasets)
datasets = index_by_label(datasets)

for name, d in datasets.items():
    print(name)
    try:
        DataDownloader(d, args.cache).download()
    except Exception as e:
        print(e)


emb = read_json(args.embeddings)
emb = index_by_label(emb)

for name, e in emb.items():
    print(name)
    try:
Exemplo n.º 16
0
parser.add_argument("--cache",
                    default="~/.bl-data",
                    type=os.path.expanduser,
                    help="Location of the data cache")
parser.add_argument('--datasets',
                    help='json library of dataset labels',
                    default='config/datasets.json',
                    type=convert_path)
parser.add_argument('--embeddings',
                    help='json library of embeddings',
                    default='config/embeddings.json',
                    type=convert_path)
args = parser.parse_args()

datasets = read_json(args.datasets)
datasets = index_by_label(datasets)

for name, d in datasets.items():
    print(name)
    try:
        DataDownloader(d, args.cache).download()
    except Exception as e:
        print(e)

emb = read_json(args.embeddings)
emb = index_by_label(emb)

for name, e in emb.items():
    print(name)
    try:
        EmbeddingDownloader(e['file'], e['dsz'], e.get('sha1'),
Exemplo n.º 17
0
    type=convert_path)
parser.add_argument('--cuda', type=baseline.str2bool, default=True)
parser.add_argument('--has_header', type=baseline.str2bool, default=True)
parser.add_argument('--sep', default='\t')

args = parser.parse_args()

args.embeddings = convert_path(
    DEFAULT_EMBEDDINGS_LOC) if args.embeddings is None else args.embeddings
args.embeddings = read_config_stream(args.embeddings)

args.vecs = convert_path(
    DEFAULT_VECTORIZERS_LOC) if args.vecs is None else args.vecs

vecs_index = read_config_stream(args.vecs)
vecs_set = index_by_label(vecs_index)
vec_params = vecs_set[args.vec_id]
vec_params['mxlen'] = args.nctx

if 'transform' in vec_params:
    vec_params['transform_fn'] = vec_params['transform']

if 'transform_fn' in vec_params and isinstance(vec_params['transform_fn'],
                                               str):
    vec_params['transform_fn'] = eval(vec_params['transform_fn'])

vectorizer = create_vectorizer(**vec_params)
if not isinstance(vectorizer, HasPredefinedVocab):
    raise Exception(
        "We currently require a vectorizer with a pre-defined vocab to run this script"
    )
Exemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser(
        description='Encode a sentence as an embedding')
    parser.add_argument('--subword_model_file', help='Subword model file')
    parser.add_argument('--nctx', default=256, type=int)
    parser.add_argument('--batchsz', default=20, type=int)
    parser.add_argument('--vec_id',
                        default='bert-base-uncased',
                        help='Reference to a specific embedding type')
    parser.add_argument('--embed_id',
                        default='bert-base-uncased',
                        help='What type of embeddings to use')
    parser.add_argument('--file', required=True)
    parser.add_argument('--column', type=str)
    parser.add_argument('--output', default='embeddings.npz')
    parser.add_argument(
        '--pool',
        help=
        'Should a reduction be applied on the embeddings?  Only use if your embeddings arent already pooled',
        type=str)
    parser.add_argument(
        '--embeddings',
        help='index of embeddings: local file, remote URL or mead-ml/hub ref',
        type=convert_path)
    parser.add_argument(
        '--vecs',
        help='index of vectorizers: local file, remote URL or hub mead-ml/ref',
        type=convert_path)
    parser.add_argument('--cuda', type=baseline.str2bool, default=True)
    parser.add_argument('--has_header', action="store_true")
    parser.add_argument(
        "--tokenizer_type",
        type=str,
        help="Optional tokenizer, default is to use string split")
    parser.add_argument(
        '--faiss_index',
        help="If provided, we will build a FAISS index and store it here")
    parser.add_argument(
        '--quoting',
        default=3,
        help='0=QUOTE_MINIMAL 1=QUOTE_ALL 2=QUOTE_NONNUMERIC 3=QUOTE_NONE',
        type=int)
    parser.add_argument('--sep', default='\t')
    parser.add_argument('--add_columns', nargs='+', default=[])

    args = parser.parse_args()

    if not args.has_header:
        if not args.column:
            args.column = 0
        if args.add_columns:
            args.add_columns = [int(c) for c in args.add_columns]
        column = int(args.column)

    else:
        column = args.column

    args.embeddings = convert_path(
        DEFAULT_EMBEDDINGS_LOC) if args.embeddings is None else args.embeddings
    args.embeddings = read_config_stream(args.embeddings)

    args.vecs = convert_path(
        DEFAULT_VECTORIZERS_LOC) if args.vecs is None else args.vecs

    vecs_index = read_config_stream(args.vecs)
    vecs_set = index_by_label(vecs_index)
    vec_params = vecs_set[args.vec_id]
    vec_params['mxlen'] = args.nctx

    if 'transform' in vec_params:
        vec_params['transform_fn'] = vec_params['transform']

    if 'transform_fn' in vec_params and isinstance(vec_params['transform_fn'],
                                                   str):
        vec_params['transform_fn'] = eval(vec_params['transform_fn'])
    tokenizer = create_tokenizer(args.tokenizer_type)
    vectorizer = create_vectorizer(**vec_params)
    if not isinstance(vectorizer, HasPredefinedVocab):
        raise Exception(
            "We currently require a vectorizer with a pre-defined vocab to run this script"
        )
    embeddings_index = read_config_stream(args.embeddings)
    embeddings_set = index_by_label(embeddings_index)
    embeddings_params = embeddings_set[args.embed_id]
    # If they dont want CUDA try and get the embedding loader to use CPU
    embeddings_params['cpu_placement'] = not args.cuda
    embeddings = load_embeddings_overlay(embeddings_set, embeddings_params,
                                         vectorizer.vocab)

    vocabs = {'x': embeddings['vocab']}
    embedder = embeddings['embeddings'].cpu()
    embedder.eval()
    if args.cuda:
        embedder = embedder.cuda()

    def _mean_pool(inputs, embeddings):
        mask = (inputs != 0)
        seq_lengths = mask.sum(1).unsqueeze(-1)
        return embeddings.sum(1) / seq_lengths

    def _zero_tok_pool(_, embeddings):
        pooled = embeddings[:, 0]
        return pooled

    def _max_pool(inputs, embeddings):
        mask = (inputs != 0)
        embeddings = embeddings.masked_fill(mask.unsqueeze(-1) == False, -1e8)
        return torch.max(embeddings, 1, False)[0]

    if args.pool:
        if args.pool == 'max':
            pool = _max_pool
        elif args.pool == 'zero' or args.pool == 'cls':
            pool = _zero_tok_pool
        else:
            pool = _mean_pool
    else:
        pool = lambda x, y: y

    def chunks(lst, n):
        """Yield successive n-sized chunks from lst."""
        for i in range(0, len(lst), n):
            yield lst[i:i + n]

    df = pd.read_csv(args.file,
                     header='infer' if args.has_header else None,
                     sep=args.sep)
    col = df[column]
    batches = []
    as_list = col.tolist()
    extra_col_map = {}
    for extra_col in args.add_columns:
        if isinstance(extra_col, int):
            key = f'column_{extra_col}'
        else:
            key = extra_col
        extra_col_map[key] = df[extra_col].tolist()
    num_batches = math.ceil(len(as_list) / args.batchsz)
    pg = baseline.create_progress_bar(num_batches, name='tqdm')
    for i, batch in enumerate(chunks(as_list, args.batchsz)):
        pg.update()
        with torch.no_grad():
            vecs = []
            for line in batch:
                tokenized = tokenizer(line)
                vec, l = vectorizer.run(tokenized, vocabs['x'])
                vecs.append(vec)
            vecs = torch.tensor(np.stack(vecs))
            if args.cuda:
                vecs = vecs.cuda()
            embedding = embedder(vecs)
            pooled_batch = pool(vecs, embedding).cpu().numpy()
            batches += [x for x in pooled_batch]

    np.savez(args.output, embeddings=batches, text=as_list, **extra_col_map)
    if args.faiss_index:
        import faiss
        index = faiss.IndexFlatIP(batches[0].shape[-1])
        batches = np.stack(batches)
        faiss.normalize_L2(batches)
        index.add(batches)
        faiss.write_index(index, args.faiss_index)
Exemplo n.º 19
0
def download_dataset(dataset: str, datasets_index: str,
                     cache: str) -> Dict[str, str]:
    dataset = index_by_label(read_config_file_or_json(datasets_index))[dataset]
    return DataDownloader(dataset, cache).download()
Exemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(description='Run senteval harness')
    parser.add_argument('--nctx', default=512, type=int)
    parser.add_argument("--module", default=None, help="Module containing custom tokenizers")
    parser.add_argument('--tasks', nargs="+", default=['sts', 'class', 'probe'])
    parser.add_argument('--batchsz', default=20, type=int)
    parser.add_argument('--tok', help='Optional tokenizer, e.g. "gpt2" or "basic". These can be defined in extra module')
    parser.add_argument('--pool', help='Should a reduction be applied on the embeddings?  Only use if your embeddings arent already pooled', type=str)
    parser.add_argument('--vec_id', help='Reference to a specific embedding type')
    parser.add_argument('--embed_id', help='What type of embeddings to use')
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
    parser.add_argument('--max_len1d', type=int, default=100)
    parser.add_argument('--embeddings', help='index of embeddings: local file, remote URL or mead-ml/hub ref', type=convert_path)
    parser.add_argument('--vecs', help='index of vectorizers: local file, remote URL or hub mead-ml/ref', type=convert_path)
    parser.add_argument('--fast', help="Run fast, but not necessarily as accurate", action='store_true')
    parser.add_argument('--data', help="Path to senteval data",
                        default=os.path.expanduser("~/dev/work/SentEval/data"))
    args = parser.parse_args()

    if args.module:
        logger.warning("Loading custom user module %s for masking rules and tokenizers", args.module)
        baseline.import_user_module(args.module)


    tokenizer = create_tokenizer(args.tok) if args.tok else None

    args.embeddings = convert_path(DEFAULT_EMBEDDINGS_LOC) if args.embeddings is None else args.embeddings
    args.embeddings = read_config_stream(args.embeddings)

    args.vecs = convert_path(DEFAULT_VECTORIZERS_LOC) if args.vecs is None else args.vecs

    vecs_index = read_config_stream(args.vecs)
    vecs_set = index_by_label(vecs_index)
    vec_params = vecs_set[args.vec_id]
    vec_params['mxlen'] = args.nctx

    if 'transform' in vec_params:
        vec_params['transform_fn'] = vec_params['transform']

    if 'transform_fn' in vec_params and isinstance(vec_params['transform_fn'], str):
        vec_params['transform_fn'] = eval(vec_params['transform_fn'])

    vectorizer = create_vectorizer(**vec_params)
    if not isinstance(vectorizer, HasPredefinedVocab):
        raise Exception("We currently require a vectorizer with a pre-defined vocab to run this script")
    embeddings_index = read_config_stream(args.embeddings)
    embeddings_set = index_by_label(embeddings_index)
    embeddings_params = embeddings_set[args.embed_id]
    embeddings = load_embeddings_overlay(embeddings_set, embeddings_params, vectorizer.vocab)

    embedder = embeddings['embeddings']
    embedder.to(args.device).eval()

    def _mean_pool(inputs, embeddings):
        mask = (inputs != 0)
        seq_lengths = mask.sum(1).unsqueeze(-1)
        return embeddings.sum(1)/seq_lengths

    def _zero_tok_pool(_, embeddings):
        pooled = embeddings[:, 0]
        return pooled

    def _max_pool(inputs, embeddings):
        mask = (inputs != 0)
        embeddings = embeddings.masked_fill(mask.unsqueeze(-1) == False, -1e8)
        return torch.max(embeddings, 1, False)[0]

    if args.pool:
        if args.pool == 'max':
            pool = _max_pool
        elif args.pool == 'zero' or args.pool == 'cls':
            pool = _zero_tok_pool
        else:
            pool = _mean_pool
    else:
        pool = lambda x, y: y

    params_senteval = {'task_path': args.data, 'usepytorch': True, 'kfold': 10}
    params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
                                     'tenacity': 5, 'epoch_size': 4}
    if args.fast:
        logging.info("Setting fast params")
        params_senteval['kfold'] = 5
        params_senteval['classifier']['epoch_size'] = 2
        params_senteval['classifier']['tenacity'] = 3
        params_senteval['classifier']['batch_size'] = 128

    # SentEval prepare and batcher
    def prepare(params, samples):
        max_sample = max(len(s) for s in samples)
        vectorizer.mxlen = min(args.nctx, max_sample + SUBWORD_EXTRA)
        logging.info('num_samples %d, mxlen set to %d', max_sample, vectorizer.mxlen)

    def batcher(params, batch):
        if not tokenizer:
            batch = [sent if sent != [] else ['.'] for sent in batch]
        else:
            batch = [tokenizer(' '.join(sent)) for sent in batch]

        vs = []
        for sent in batch:
            v, l = vectorizer.run(sent, vectorizer.vocab)
            vs.append(v)
        vs = np.stack(vs)
        with torch.no_grad():
            inputs = torch.tensor(vs, device=args.device)
            encoding = embedder(inputs)
            encoding = pool(inputs, encoding)
            encoding = encoding.cpu().numpy()
        return encoding

    se = senteval.engine.SE(params_senteval, batcher, prepare)
    transfer_tasks = []
    if 'sts' in args.tasks:
        transfer_tasks += ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'SICKRelatedness', 'STSBenchmark']
    if 'class' in args.tasks:
        transfer_tasks += ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
                           'SICKEntailment']
    if 'probe' in args.tasks:
        transfer_tasks += ['Length', 'WordContent', 'Depth', 'TopConstituents',
                           'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
                           'OddManOut', 'CoordinationInversion']

    results = se.eval(transfer_tasks)
    print(results)