def generate_text(session, model, encode, decode, description, d_len, stop_length=25, stop_tokens=[STOP, NONE], temperature=1.0): init_state = session.run(model["init_state"]) encoder_inputs = data_reader.pad(encode(description), d_len, 'left') state, encoder_outputs = get_encoder_outputs(session, model, init_state, encoder_inputs) start = encode("")[0] # state, predictions = predict(session, model, # encoding, encoder_outputs, # [[start]]) # output_ids = [data_reader.sample(predictions[0], temperature=temperature)] # inputs = encode(decode(output_ids)) inputs = [start] output_ids = [] for i in range(stop_length): x = inputs[-1] state, predictions = predict(session, model, state, encoder_outputs, [[x]]) next_id = data_reader.sample(predictions[0], temperature=temperature) output_ids.append(next_id) output = decode([next_id]) inputs.append(next_id) if stop_tokens and output in stop_tokens: break output_text = "[" + description + "]\n" output_text += decode(output_ids) return output_text
import numpy as np import data_reader as dr c9 = dr.sample( r"C:\Users\Scott Reid\Documents\Masters\Research\Data\TbTe3_AB2") c9.book_files()
import numpy as np import data_reader as dr c9 = dr.sample(r"C:\Users\Scott Reid\Documents\Masters\Research\Data\ErTe3 C9") c9.book_files()
import numpy as np import data_reader as dr c9 = dr.sample(r"C:\Users\Scott Reid\Documents\Masters\Research\Data\In_Situ_3-09-18") c9.book_files()