Ejemplo n.º 1
0
def run():
    text_in = generate_text_input()
    NUM_EPOCS = 500
    #train(full_layer_learner,text_in,text_in,NUM_EPOCS)
    state_pred = full_layer_learner.get_stateful_predict()
    [outs] = state_pred(text_in[:1000])
    #np.set_printoptions(threshold=np.inf)
    print(outs)
    print(string_processing.out_list_to_str(outs))
    [full_outs] = state_pred(text_in)
    outstr = string_processing.out_list_to_str(full_outs)
    save_text("sampled_outputs/deep_learner.txt", outstr)
Ejemplo n.º 2
0
def run():
    text_in = generate_text_input()
    NUM_EPOCS = 500
    train(full_layer_learner, text_in, text_in, NUM_EPOCS)
    [outs] = full_layer_learner.get_stateful_predict()(text_in[:1000])
    #np.set_printoptions(threshold=np.inf)
    print(outs)
    print(string_processing.out_list_to_str(outs))
Ejemplo n.º 3
0
def test():
    text_in = generate_text_input()
    expected = text_in[:200]
    [actual] = full_layer_learner.get_stateful_predict()(expected)
    act_text = string_processing.out_list_to_str(actual)
    my_error_fn = error_fn(calc_error_squared)
    errors = []
    skip = 1
    for i in range(0, 198, skip):
        [err] = my_error_fn(expected[i + 1:i + skip + 1], actual[i:i + skip])
        print(err, act_text[i:i + skip])
Ejemplo n.º 4
0
def run_cumulative_lstm():
    in_base_stack = np.load(lstm_framework.base_input_filename)
    #in_first_stack = np.load(lstm_framework.first_stage_cell_filename)
    in_second_stack = np.load(lstm_framework.second_stage_output_filename)

    in_stack3 = np.concatenate((in_base_stack, in_second_stack), axis=1)

    lstm_stage3 = lstm_framework.gen_lstm_with("huck_fin_stage3outsub_fixed",
                                               in_stack3, in_base_stack, 200)

    #lstm_stage3.train(in_stack3,in_base_stack,100)
    [out_vals] = lstm_stage3.state_predict(in_stack3[:3000])
    outstr = string_processing.out_list_to_str(out_vals)
    save_text("sampled_outputs/stacked_output_full.txt", outstr)
    all_idxs = lstm_stage3.stateful_gen
    outstr = "".join(string_processing.GOOD_CHARS[idx] for idx in all_idxs)
    print(outstr)
    save_text("sampled_outputs/basic_generated_text.txt", outstr)

    print(compare_text(outstr))
Ejemplo n.º 5
0
def stateful_predict():
    [outs] = my_lstm.state_predict(in_stack)
    return string_processing.out_list_to_str(outs)
    cell_state = np.zeros(CELL_STATE_LEN)
    output_vec = np.zeros(OUT_LEN)
    np_cell_forget = NP_WeightBias(cell_forget_fn)
    np_add_barrier = NP_WeightBias(add_barrier_fn)
    np_add_cell = NP_WeightBias(add_cell_fn)
    np_new_output = NP_WeightBias(to_new_output_fn)
    np_full_output = NP_WeightBias(full_output_fn)
    all_cell_states = []
    all_outputs = []
    i = 0
    for inp_vec in input_list:
        cell_state, output_vec = np_calc_output(inp_vec, cell_state,
                                                output_vec, np_cell_forget,
                                                np_add_barrier, np_add_cell,
                                                np_new_output)
        all_cell_states.append(cell_state)
        all_outputs.append(np.tanh(np_full_output.calc_output(output_vec)))
        if i % 100 == 0:
            print(i, flush=True)
        i += 1
    return all_cell_states, all_outputs


train_str = string_processing.get_str("data/huck_fin.txt")[:1000]
in_vec_list = string_processing.in_vec(train_str)
out_cells, out_outputs = stateful_predict(in_vec_list)

outtxt = string_processing.out_list_to_str(out_outputs)

print(outtxt)
Ejemplo n.º 7
0
def save_full_prediction():
    inp1, _ = get_inputs_unbatched(text_in)
    pred_fn = full_layer_learner1.get_stateful_predict()
    [outputs] = pred_fn(inp1)
    str1 = string_processing.out_list_to_str(outputs)
    save_text("sampled_outputs/model2_full.txt", str1)
Ejemplo n.º 8
0
def predict_model():
    inp1, _ = get_inputs_unbatched(text_in[:1000])
    pred_fn = full_layer_learner1.get_stateful_predict()
    [outputs] = pred_fn(inp1)
    print(string_processing.out_list_to_str(outputs))