Esempio n. 1
0
    def choose_action(self, observation, load_checkpoint, Testing):
        self.action_space = [i for i in range(self.n_actions)]
        if np.random.random(
        ) < self.epsilon and not load_checkpoint and not Testing:
            Void = np.array(self.env.VoidCheck)
            BC = np.array(
                np.reshape(self.env.BC_state, (1, (self.EX * self.EY))))
            LC = np.array(
                np.reshape(self.env.LC_state, (1, (self.EX * self.EY))))
            Clear_List = np.where(Void == 0)[0]
            BC_List = np.where(BC == 1)[0]
            LC_List = np.where(LC == 1)[0]
            self.action_space = [
                ele for ele in self.action_space if ele not in Clear_List
            ]
            self.action_space = [
                ele for ele in self.action_space if ele not in BC_List
            ]
            self.action_space = [
                ele for ele in self.action_space if ele not in LC_List
            ]
            action = np.random.choice(self.action_space)
        else:
            state = observation
            state = state.reshape(-1, self.EX, self.EY, 3)
            actions = self.q_eval.call(state)
            action = argmax(actions, axis=1).numpy()[0]

        return action
def matriz_confusion(y_test, y_predecida):
    # le hacemos el argmax para desaher el to categorical
    matriz_conf = confusion_matrix(labels=argmax(y_test, axis=1),
                                   predictions=argmax(y_predecida,
                                                      axis=1)).numpy()
    matriz_normalizada = np.around(matriz_conf.astype('float') /
                                   matriz_conf.sum(axis=1)[:, np.newaxis],
                                   decimals=2)

    clases = [i for i in range(NUM_CLASES)]
    dataframe_matriz = pd.DataFrame(matriz_normalizada,
                                    index=clases,
                                    columns=clases)

    figure = plt.figure(figsize=(8, 8))
    sns.heatmap(dataframe_matriz, annot=True, cmap=plt.cm.Blues)
    plt.tight_layout()
    plt.ylabel('Valor real')
    plt.xlabel('Valor predecido')
    plt.show()
def results():

    global model, tokenizer, maxlen, TEST_STRING

    sentences = nltk.sent_tokenize(TEST_STRING)

    text_seq = tokenizer.texts_to_sequences(sentences)
    text_seq_padded = pad_sequences(text_seq,
                                    maxlen=maxlen,
                                    padding='post',
                                    truncating='post')

    predictions = model.predict(text_seq_padded)

    class_num = tfmath.argmax(predictions, axis=1)
    class_num = tfbackend.eval(class_num)
    labels = decode_onehot_labels(class_num)

    specialForm = special_form(labels)
    selects = [
        getattr(specialForm, f"special_{i}")
        for i in range(specialForm.n_attrs)
    ]

    data = list(zip(sentences, roundoff(predictions), labels, selects))
    bin_data = loadTSVfromBin()
    print("\n\nBIN LEN:", len(bin_data), '\n\n')

    if specialForm.validate_on_submit():
        corrected_labels = [sel.data for sel in selects]

        appendTSVtoBin(corrected_labels, sentences)

        flash(
            f"Added { len(corrected_labels) } rows to the bin, Now total rows in bin are { len(bin_data)+len(corrected_labels) }",
            "success")
        return redirect(url_for("proceed"))

    return render_template("results.html",
                           data=data,
                           len_data=len(data),
                           bin_data=bin_data,
                           len_bin_data=len(bin_data),
                           class_colors=class_colors,
                           specialForm=specialForm)
Esempio n. 4
0
def classified():
    text = request.json
    text = text['mytext']
    global model, tokenizer, maxlen
    sentences = nltk.sent_tokenize(text)
    text_seq = tokenizer.texts_to_sequences(sentences)
    text_seq_padded = pad_sequences(text_seq,
                                    maxlen=maxlen,
                                    padding='post',
                                    truncating='post')
    predictions = model.predict(text_seq_padded)
    class_num = tfmath.argmax(
        predictions, axis=1
    )  #Returns the index with the largest value across axes of a tensor.
    class_num = tfbackend.eval(class_num)
    labels = decode_onehot_labels(class_num)

    dict = {}
    dat = zip(sentences, labels)
    for i in dat:
        dict[i[0]] = i[1]
    return json.dumps(dict)
Esempio n. 5
0
    def learn(self):

        if self.memory.mem_cntr < self.batch_size:
            Loss = .5
            return Loss

        if self.learn_step_counter % self.replace == 0 and self.learn_step_counter > 0:
            self.q_next.set_weights(self.q_eval.get_weights())

        states, actions, rewards, states_, dones = \
                                    self.memory.sample_buffer(self.batch_size)
        q_pred = self.q_eval(states)
        self.q_pred = q_pred
        q_next = self.q_next(states_)
        q_target = q_pred.numpy()
        max_actions = argmax(self.q_eval(states_), axis=1)
        # improve on my solution!
        for idx, terminal in enumerate(dones):
            #if terminal:
            #q_next[idx] = 0.0
            q_target[idx, actions[idx]] = rewards[idx] + \
                    self.gamma*q_next[idx, max_actions[idx]]*(1-int(dones[idx]))

        Loss = np.subtract(q_target, q_pred.numpy())
        Loss = np.square(Loss)
        Loss = Loss.mean()
        self.q_eval.train_on_batch(states, q_target)

        self.epsilon = self.epsilon - self.eps_dec if self.epsilon > \
                        self.eps_min else self.eps_min

        self.learn_step_counter += 1
        if self.learn_step_counter > 5000:
            self.lr = 2.5e-3
        if self.learn_step_counter > 7500:
            self.lr = 1e-3
        return Loss