conv = CNNModel().cuda() #self, batch_size, inputs_size, img_w, hidden_size, num_layers enc = EncoderLSTM(batch_size, 512, 46, enc_hidden_size, enc_layers, gpu=True).cuda().eval() #self, batch_size, inputs_size, vocab_size, hidden_size, max_decoder_l, dropout_p=0.01 dec = ATTNDecoder(batch_size, enc_hidden_size, 1, vocab_size, dec_hidden_size, gpu=True).cuda().eval() conv.load_state_dict( torch.load( '/Users/thomasstruble/Documents/GitHub/chem-ie/omrPY/im_smiles/src/model/model_conv' )) enc.load_state_dict( torch.load( '/Users/thomasstruble/Documents/GitHub/chem-ie/omrPY/im_smiles/src/model/model_enc' )) dec.load_state_dict( torch.load( '/Users/thomasstruble/Documents/GitHub/chem-ie/omrPY/im_smiles/src/model/model_dec' )) pred = test(input_tensor, target_tens, conv, enc, dec, gpu=True) print(pred)
# return top_p if __name__ == '__main__': conv = CNNModel().cuda() #self, batch_size, inputs_size, img_w, hidden_size, num_layers enc = EncoderLSTM(BATCH_SIZE, 512, enc_hidden_size, enc_layers, gpu=True).cuda().eval() #self, batch_size, inputs_size, vocab_size, hidden_size, max_decoder_l, dropout_p=0.01 dec = ATTNDecoder(BATCH_SIZE, enc_hidden_size, vocab_size, dec_hidden_size, gpu=True).cuda().eval() conv.load_state_dict( torch.load( '/scratch2/sophiat/chem-ie-TJS_omrPY/omrPY/im_smiles/src/model/model_conv' )) enc.load_state_dict( torch.load( '/scratch2/sophiat/chem-ie-TJS_omrPY/omrPY/im_smiles/src/model/model_enc' )) dec.load_state_dict( torch.load( '/scratch2/sophiat/chem-ie-TJS_omrPY/omrPY/im_smiles/src/model/model_dec' )) pred = test_iters() print(pred)