Beispiel #1
0
def main():
    input_lang, output_lang, pairs = prepare_data('ques',
                                                  'ans',
                                                  '../debug.json',
                                                  reverse=False)
    encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device)
    attn_decoder = AttnDecoderRNN(hidden_size,
                                  output_lang.n_words,
                                  dropout_p=0.1,
                                  max_length=1000).to(device)

    rate = 0.9
    pairs_train, pairs_test = pairs[0:int(len(pairs) *
                                          rate)], pairs[int(len(pairs) *
                                                            rate):]
    encoder.load_state_dict(torch.load('model/encoder-0.model'))
    encoder.eval()
    attn_decoder.load_state_dict(torch.load('model/decoder-0.model'))
    attn_decoder.eval()
    evaluate_all(encoder,
                 attn_decoder,
                 pairs_test,
                 max_length=1000,
                 input_lang=input_lang,
                 output_lang=output_lang,
                 n=len(pairs_test))
    # show_plot(loss_history)
    print('done test')
Beispiel #2
0
def inference(sentence, language, MODEL_DIR, codersum):
    encoder = EncoderRNN(language.n_words,
                         config.HIDDEN_SIZE,
                         config.NUM_LAYER,
                         max_length=config.MAX_LENGTH + 1)
    decoder = AttnDecoderRNN(config.ATT_MODEL,
                             config.HIDDEN_SIZE,
                             language.n_words,
                             config.NUM_LAYER,
                             dropout_p=config.DROPOUT)

    encoder_path = os.path.join(MODEL_DIR, "encoder_" + str(codersum) + ".pth")
    decoder_path = os.path.join(MODEL_DIR, "decoder_" + str(codersum) + ".pth")
    encoder.load_state_dict(torch.load(encoder_path, map_location="cpu"))
    decoder.load_state_dict(torch.load(decoder_path, map_location="cpu"))
    encoder.eval()
    decoder.eval()
    batch_size = 1

    input_index = indexes_from_sentence(language, sentence)
    input_index = pad_sentence(input_index)  # 填充
    input_variable = torch.LongTensor([input_index])
    encoder_hidden, encoder_cell = encoder.init_hidden(batch_size)
    encoder_outputs, encoder_hidden, encoder_cell = encoder(
        input_variable, encoder_hidden, encoder_cell)

    decoder_input = torch.zeros(batch_size, 1).long()
    decoder_context = torch.zeros(batch_size, decoder.hidden_size)
    decoder_hidden = encoder_hidden
    decoder_cell = encoder_cell
    if config.USE_CUDA:
        decoder_input = decoder_input.cuda()
        decoder_context = decoder_context.cuda()

    decoded_words = []

    # Run through decoder
    for di in range(config.MAX_LENGTH):
        decoder_output, decoder_context, decoder_hidden, decoder_cell, _ = decoder(
            decoder_input, decoder_context, decoder_hidden, decoder_cell,
            encoder_outputs)

        # Choose top word from output
        topv, topi = decoder_output.data.topk(1)
        ni = topi[0][0]
        if ni == 0:
            break
        else:
            decoded_words.append(language.index2word[ni.item()])

        decoder_input = torch.LongTensor([[ni]])
        if config.USE_CUDA:
            decoder_input = decoder_input.cuda()

    return "".join(decoded_words)
Beispiel #3
0
def load_model_param(language, model_dir):
    encoder = EncoderRNN(language.n_words,
                         config.HIDDEN_SIZE,
                         config.NUM_LAYER,
                         max_length=17 + 1)
    decoder = AttnDecoderRNN(config.ATT_MODEL,
                             config.HIDDEN_SIZE,
                             language.n_words,
                             config.NUM_LAYER,
                             dropout_p=config.DROPOUT)

    encoder_path = os.path.join(config.MODEL_DIR, "encoder.pth")
    decoder_path = os.path.join(config.MODEL_DIR, "decoder.pth")

    encoder.load_state_dict(torch.load(encoder_path, map_location="cpu"))
    decoder.load_state_dict(torch.load(decoder_path, map_location="cpu"))
    encoder.eval()
    decoder.eval()
    return encoder, decoder
Beispiel #4
0
def main():
    nIters = 50000
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    loadFilename = os.path.join('checkpoints',
                                '{}_{}.tar'.format(nIters, 'checkpoint'))
    checkpoint = torch.load(loadFilename, map_location=device)

    # input_lang, output_lang, pairs = prepareData('eng', 'fra', True, 'data', filter=False)
    # If loading a model trained on GPU to CPU
    encoder_sd = checkpoint['en']
    encoder_sd
    decoder_sd = checkpoint['de']
    decoder_sd
    hidden_size = 512
    input_lang = Lang('fra')
    output_lang = Lang('eng')
    input_lang.__dict__ = checkpoint['input_lang']
    output_lang.__dict__ = checkpoint['output_lang']
    encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device)
    decoder = AttnDecoderRNN(hidden_size, output_lang.n_words,
                             dropout_p=0).to(device)
    encoder.load_state_dict(encoder_sd)
    decoder.load_state_dict(decoder_sd)
    encoder.eval()
    decoder.eval()
    # encoder_optimizer_sd = checkpoint['en_opt']
    # decoder_optimizer_sd = checkpoint['de_opt']
    _, _, test_pairs = prepareData('eng',
                                   'fra',
                                   True,
                                   dir='test',
                                   filter=False)
    evaluateRandomly(device, test_pairs, encoder, decoder, input_lang,
                     output_lang)
    decode_batch(device,
                 test_pairs,
                 encoder,
                 decoder,
                 input_lang,
                 output_lang,
                 batch_size=64)
Beispiel #5
0
# define color generator related model
encoder = EncoderRNN(hidden_size=150, n_layers=1, dropout_p=0)
decoder = AttnDecoderRNN(hidden_size=150, n_layers=1, dropout_p=0)
sen2vec = Sentence2Vec()
encoder.load_state_dict(
    torch.load("./ckpt/ckpt_666.pt", map_location=lambda storage, loc: storage)[
        "encoder"
    ]
)
decoder.load_state_dict(
    torch.load("./ckpt/ckpt_666.pt", map_location=lambda storage, loc: storage)[
        "decoder_state_dict"
    ]
)
encoder.eval()
decoder.eval()


def lab2rgb_1d(in_lab, clip=True):
    tmp_rgb = lab2rgb(in_lab[np.newaxis, np.newaxis, :], illuminant="D50").flatten()
    if clip:
        tmp_rgb = np.clip(tmp_rgb, 0, 1)
    return tmp_rgb


class InputText(BaseModel):
    input_text: str = None


class OutputColor(BaseModel):
    colors: List[str] = []