Ejemplo n.º 1
0
    def _load_model(self, path: str) -> SequentialRNN:
        path_to_model = os.path.join(path, WEIGHTS_FILE)
        logger.debug(f"Loading model from: {path_to_model} ...")

        awd_lstm_lm_config.update({
            'emb_sz': EMB_SZ,
            'n_hid': N_HID,
            'n_layers': N_LAYERS,
            'out_bias': False
        })
        model = get_language_model(AWD_LSTM, VOCAB_SIZE, awd_lstm_lm_config)
        if self.force_use_cpu:
            map_location = lambda storage, loc: storage
            logger.debug("Using CPU for inference")
        elif cuda.is_available():
            map_location = None
            model.cuda()
            logger.debug("Using GPU for inference")
        else:
            map_location = lambda storage, loc: storage
            logger.info("Cuda not available. Falling back to using CPU.")

        state_dict = torch.load(path_to_model, map_location=map_location)
        model.load_state_dict(weights_to_v1(state_dict), strict=True)

        return model
Ejemplo n.º 2
0
def get_model():
    # puzzling the pieces together
    # get weights and itos
    model_path = untar_data(URLs.WT103, data=False)
    fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
    wgts_fname, itos_fname = fnames
    itos = pickle.load(open(itos_fname, 'rb'))
    wgts = torch.load(wgts_fname, map_location=lambda storage, loc: storage)

    # get parameters for language model
    default_dropout = {
        'language': np.array([0.25, 0.1, 0.2, 0.02, 0.15]),
        'classifier': np.array([0.4, 0.5, 0.05, 0.3, 0.4])
    }
    drop_mult = 1.
    tie_weights = True
    bias = True
    qrnn = False
    dps = default_dropout['language'] * drop_mult
    bptt = 70
    vocab_size = len(itos)
    emb_sz = 400
    nh = 1150
    nl = 3
    pad_token = 1
    drop_mult = 1.

    model = get_language_model(vocab_size,
                               emb_sz,
                               nh,
                               nl,
                               pad_token,
                               input_p=dps[0],
                               output_p=dps[1],
                               weight_p=dps[2],
                               embed_p=dps[3],
                               hidden_p=dps[4],
                               tie_weights=tie_weights,
                               bias=bias,
                               qrnn=qrnn)

    # load weights into model
    model.load_state_dict(wgts)
    embedding_vectors = wgts['0.encoder.weight'].cpu().numpy()

    # move to gpu
    #model.cuda()

    return model, itos, embedding_vectors
Ejemplo n.º 3
0
def get_doc_encoder_and_embeddings(document_token_lookup,
                                   only_use_last_out=False):
    emb_sz = 400
    n_hid = 1150
    n_layers = 3
    pad_token = 1
    model = get_language_model(len(document_token_lookup), emb_sz, n_hid,
                               n_layers, pad_token)
    wgts = torch.load('lstm_wt103.pth',
                      map_location=lambda storage, loc: storage)
    with open('./itos_wt103.pkl', 'rb') as fh:
        old_itos = pickle.load(fh)
    old_stoi = _.invert(old_itos)
    string_lookup = _.invert(document_token_lookup)
    wgts = convert_weights(
        wgts, old_stoi,
        [string_lookup[i] for i in range(len(document_token_lookup))])
    model.load_state_dict(wgts)
    rnn_enc = model[0]
    embedding = rnn_enc.encoder
    return SequentialRNN(rnn_enc, OutPooler(only_use_last_out)), embedding
Ejemplo n.º 4
0
    def _load_model(self, path: str, custom_vocab: Optional[Vocab] = None) -> Tuple[SequentialRNN, Vocab]:
        path_to_model = os.path.join(path, BEST_MODEL_FILE_NAME)
        logger.debug(f"Loading model from: {path_to_model} ...")

        vocab = custom_vocab if custom_vocab else self._original_vocab
        model = get_language_model(self._config.arch.get_module(), len(vocab.itos), create_custom_config(self._config))
        map_location = get_map_location(self._force_use_cpu)
        if cuda.is_available():
            model.cuda()
        state_dict = torch.load(path_to_model, map_location=map_location)

        # a more simple solution is to use fastai's load_learner,
        # however it doesn't seem to work out of the box with customizations we've done

        # update: it appeared that it's quite simple to load weights. So maybe not worth it loading a learner?
        weights = OrderedDict(state_dict['model'] if ('model' in state_dict) else state_dict)
        if custom_vocab:
            weights = convert_weights(weights, self._original_vocab.stoi, custom_vocab.itos)
        model.load_state_dict(weights, strict=True)

        return model, vocab