Beispiel #1
0
def cost_accuracy_string(net: Network, training: Batch, validation: Batch):
    Y = net.evaluate(training.images)
    accuracy = net.accuracy(training.one_hot_labels, outputs=Y)
    cost = net.cost(training.one_hot_labels, outputs=Y)
    res = 'Training:\n' + \
          '- accuracy: {:.3%}\n'.format(accuracy) + \
          '- cost:     {:.3f}\n'.format(cost)

    Y = net.evaluate(validation.images)
    accuracy = net.accuracy(validation.one_hot_labels, outputs=Y)
    cost = net.cost(validation.one_hot_labels, outputs=Y)
    res += 'Validation:\n' + \
           '- accuracy: {:.3%}\n'.format(accuracy) + \
           '- cost:     {:.3f}\n'.format(cost)
    return res
Beispiel #2
0
    def load_population(self, data):
        pop = []
        for n in data:
            new_network = Network(n['nn_param_choices'])
            new_network.network['nb_neurons'] = []
            new_network.network['activation'] = []

            new_network.network['nb_neurons'].extend(n['network']['nb_neurons'])
            new_network.network['nb_layers'] = n['network']['nb_layers']
            new_network.network['activation'].extend(n['network']['activation'])
            new_network.network['optimizer'] = n['network']['optimizer']
            new_network.accuracy = n['accuracy']

            pop.append(new_network)

        for c in pop:
            if not self.check_network(c.network):
                raise ValueError('Network out of balance')

        return pop
Beispiel #3
0
class Model(object):
    def __init__(self, config):
        self.epoch_count = 0
        self.config = config
        self.data = DataSet(config)
        self.add_placeholders()
        self.summarizer = tf.summary
        self.net = Network(config, self.summarizer)
        self.optimizer = self.config.solver.optimizer
        self.y_pred = self.net.prediction(self.x, self.keep_prob)
        self.loss = self.net.loss(self.y_pred, self.y)
        self.accuracy = self.net.accuracy(tf.nn.sigmoid(self.y_pred), self.y)
        self.summarizer.scalar("accuracy", self.accuracy)
        self.summarizer.scalar("loss", self.loss)
        self.train = self.net.train_step(self.loss)
        self.saver = tf.train.Saver()
        self.init = tf.global_variables_initializer()
        self.local_init = tf.local_variables_initializer()

    def add_placeholders(self):
        self.x = tf.placeholder(tf.float32, shape=[None, self.config.features_dim])
        self.y = tf.placeholder(tf.float32, shape=[None, self.config.labels_dim])
        self.keep_prob = tf.placeholder(tf.float32)

    def run_epoch(self, sess, data, summarizer, epoch):
        err = list()
        i = 0
        X, Y, Y_pred = None, None, None
        merged_summary = self.summarizer.merge_all()
        for X, Y, tot in self.data.next_batch(data):
            feed_dict = {self.x : X, self.y : Y, self.keep_prob : self.config.solver.dropout}
            if not self.config.load:
                summ, _, loss, Y_pred = sess.run([merged_summary, self.train, self.loss, tf.nn.sigmoid(self.y_pred)], feed_dict=feed_dict)
                err.append(loss) 
                output = "Epoch ({}) Batch({}) : Loss = {}".format(self.epoch_count, i , loss)
                with open("../stdout/{}_train.log".format(self.config.project_name), "a+") as log:
                    log.write(output + "\n")
                print("   {}".format(output), end='\r')
            step = int(epoch*tot + i)
            summarizer.add_summary(summ, step)
            i += 1
        err = np.asarray(err)
        return np.mean(err), step

    def run_eval(self, sess, data, summary_writer=None, step=0):
        y, y_pred, loss_ = list(), list(), 0.0
        loss = 0.0
        merged_summary = self.summarizer.merge_all()
        i = 0
        for X, Y, tot in self.data.next_batch(data):
            feed_dict = {self.x: X, self.y: Y, self.keep_prob: 1.0}
            if i == tot-1 and summary_writer is not None:
                summ, loss_ =  sess.run([merged_summary, self.loss], feed_dict=feed_dict)
                summary_writer.add_summary(summ, step)
            else:
                loss_ = sess.run(self.loss, feed_dict=feed_dict)
            loss += loss_
            i += 1
        return loss / i
    
    def get_metrics(self, sess, data):
        accuracy, y_pred, i = 0.0, None, 0.0
        for X, Y, tot in self.data.next_batch(data):
            feed_dict = {self.x: X, self.y: Y, self.keep_prob: 1.0}
            Y_pred, accuracy_val = sess.run([tf.nn.sigmoid(self.y_pred), self.accuracy], feed_dict=feed_dict)
            metrics = evaluate(predictions=Y_pred, labels=Y)
            p_k = patk(predictions=Y_pred, labels=Y)
            accuracy += accuracy_val 
            i += 1
        return metrics, accuracy / i, p_k

    def add_summaries(self, sess):
        if self.config.load or self.config.debug:
            path_ = "../results/tensorboard"
        else :
            path_ = "../bin/results/tensorboard"
        summary_writer_train = tf.summary.FileWriter(path_ + "/train", sess.graph)
        summary_writer_val = tf.summary.FileWriter(path_ + "/val", sess.graph)
        summary_writer_test = tf.summary.FileWriter(path_+ "/test", sess.graph)
        summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
        return summary_writers

    def fit(self, sess, summarizer):
        '''
         - Patience Method : 
         + Train for particular no. of epochs, and based on the frequency, evaluate the model using validation data.
         + If Validation Loss increases, decrease the patience counter.
         + If patience becomes less than a certain threshold, devide learning rate by 10 and switch back to old model
         + If learning rate is lesser than a certain 
        '''
        sess.run(self.init)
        sess.run(self.local_init)
        max_epochs = self.config.max_epochs
        patience = self.config.patience
        patience_increase = self.config.patience_increase
        improvement_threshold = self.config.improvement_threshold
        best_validation_loss = 1e6
        self.epoch_count = 0
        best_step, losses, learning_rate = -1, list(), self.config.solver.learning_rate
        while self.epoch_count < max_epochs :
            if(self.config.load == True):
                break
            start_time = time.time()
            average_loss, tr_step = self.run_epoch(sess, "train", summarizer['train'], self.epoch_count)
            duration = time.time() - start_time
            if not self.config.debug:
                if self.epoch_count % self.config.epoch_freq == 0 :
                    val_loss = self.run_eval(sess, "validation", summarizer['val'], tr_step)
                    test_loss = self.run_eval(sess, "test", summarizer['test'], tr_step)
                    metrics, _, _ = self.get_metrics(sess, "test")
                    output =  "=> Training : Loss = {:.3f} | Validation : Loss = {:.3f} | Test : Loss = {:.3f}".format(average_loss, val_loss, test_loss)
                    with open("../stdout/validation.log", "a+") as f:
                        output_ = output + "\n=> Test : Coverage = {}, Average Precision = {}, Micro Precision = {}, Micro Recall = {}, Micro F Score = {}".format(metrics['coverage'], metrics['average_precision'], metrics['micro_precision'], metrics['micro_recall'], metrics['micro_f1'])
                        output_ += "\n=> Test : Macro Precision = {}, Macro Recall = {}, Macro F Score = {}\n\n\n".format(metrics['macro_precision'], metrics['macro_recall'], metrics['macro_f1'])
                        f.write(output_)
                    print(output)
                    if self.config.have_patience:
                        if val_loss < best_validation_loss :
                            if val_loss < best_validation_loss * improvement_threshold :
                                self.saver.save(sess, self.config.ckptdir_path + "/model_best.ckpt")
                                best_validation_loss = val_loss
                                best_step = self.epoch_count
                        else:
                            if patience < 1:
                                self.saver.restore(sess, self.config.ckptdir_path + "/model_best.ckpt")
                                if learning_rate <= 0.00001 :
                                    print("=> Breaking by Patience Method")
                                    break
                                else :
                                    learning_rate /= 10
                                    patience = self.config.patience
                                    print("\033[91m=> Learning rate dropped to {}\033[0m".format(learning_rate))
                            else :
                                patience -= 1
            self.epoch_count += 1
        print("=> Best epoch : {}".format(best_step))
        if self.config.debug == True:
            sys.exit()
        test_loss = self.run_eval(sess, "test", summarizer['test'], tr_step)
        test_metrics, test_accuracy, p_k  = self.get_metrics(sess, "test")
        returnDict = {"test_loss" : test_loss, "test_accuracy" : test_accuracy, 'test_metrics' : test_metrics, "test_pak" : p_k}
        if self.config.debug == False:
            returnDict["train"] =  best_validation_loss
        self.saver.save(sess, self.config.ckptdir_path)
        return returnDict
Beispiel #4
0
class Gen():
    def __init__(self, value: list, max_count: int, max_lauers: int,
                 min_lauers: int, k: int) -> None:
        self.max_count = max_count
        self.max_lauers = max_lauers
        self.min_lauers = min_lauers
        self.k = k
        if value:
            self.value = value
        else:
            # Если не передана стурктура нейронной сети, генерируем ее
            self.value = [10]
            self.value += [
                randint(1, self.max_count)
                for _ in range(randint(min_lauers - 2, max_lauers - 2))
            ]
            self.value += [1]

    # Создание и обучение нейронной сети
    def create(self, epochs: int) -> None:
        dataset = pd.read_csv('housepricedata.csv').values
        X = dataset[:, 0:10]
        Y = dataset[:, 10]
        min_max_scaler = preprocessing.MinMaxScaler()
        X_scale = min_max_scaler.fit_transform(X)

        X_train, X_val_and_test, Y_train, Y_val_and_test = train_test_split(
            X_scale, Y, test_size=0.3)
        X_val, self.X_test, Y_val, self.Y_test = train_test_split(
            X_val_and_test, Y_val_and_test, test_size=0.5)
        self.network = Network(self.value)
        self.network.compile()
        self.network.fit(X_train, Y_train, self.value[0], epochs,
                         (X_val, Y_val))

    # Расчет значения для отбора геномов
    def check(self) -> float:
        return self.network.accuracy() * self.k / sum(self.value)

    # Мутация гена
    def mutation(self) -> None:
        operator = randint(0, 2)
        if operator == 0 and len(self.value) > 2:  # Изменение занчения
            self.value[randint(1,
                               len(self.value) - 2)] = randint(
                                   1, self.max_count)
        elif operator == 1 and len(
                self.value) < self.max_lauers:  # Добавление слоя
            self.value.insert(randint(1,
                                      len(self.value) - 1),
                              randint(1, self.max_count))
        elif operator == 2 and len(
                self.value) > self.min_lauers:  # Удаление слоя
            del self.value[randint(1, len(self.value) - 2)]

    # Получение точности сети
    def accuracy(self) -> float:
        return self.network.accuracy()

    # Получение ошибки сети
    def mse(self) -> float:
        return self.network.mse(self.X_test, self.Y_test)
Beispiel #5
0
def main():
    tries = 0
    plays = 0
    db_good = []
    db_bad = []
    
    minesweeper = Minesweeper()
    
    # stores XY of mouse events
    mouse_x = 0
    mouse_y = 0
    
    while True:
        has_game_ended = False
        game_over = False

        minesweeper.new_game()
        
        tries += 1
        print('Jogo', tries)

        # For random training
        chosen_squares = []
        chosen_squares_length = 1

        # Main game loop
        while not has_game_ended:
            # Initialize variables
            mouse_clicked = False
            safe_squares = []
            flagged_squares = []
            new_position = False # For random training

            # Draw screen
            minesweeper.draw_field()

            # Get Fred's AI input
            if AI_TYPE == 'FRED':
                info = minesweeper.available_info()
                safe_squares, flagged_squares = minesweeper.get_AI_input(info)

            # Get random position
            if AI_TYPE == 'RANDOM':
                while not new_position:
                    choice = [[random.choice(range(FIELDWIDTH)),random.choice(range(FIELDHEIGHT))]]
                    if minesweeper.revealed_boxes[choice[0][0]][choice[0][1]] == False:
                        new_position = True
                safe_squares = choice

            # Get player input
            for event in pygame.event.get():
                if event.type == QUIT or (event.type == KEYDOWN and (event.key == K_ESCAPE or event.key == K_q)):
                    minesweeper.terminate()
                elif event.type == MOUSEMOTION:
                    mouse_x, mouse_y = event.pos
                elif event.type == MOUSEBUTTONDOWN:
                    if event.button == LEFT_CLICK:
                        mouse_x, mouse_y = event.pos
                        mouse_clicked = True
                        box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
                        if box_x is not None and box_y is not None:
                            safe_squares = [(box_x, box_y)]
                    if event.button == RIGHT_CLICK:
                        mouse_x, mouse_y = event.pos
                        box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
                        if box_x is not None and box_y is not None:
                            flagged_squares = [(box_x, box_y)]

            # Keeps track of chosen squares
            if len(safe_squares) > 0:
                chosen_squares.append(safe_squares)

            # Checks if game is over for AI
            if AI_TYPE != 'HUMAN':
                if game_over:
                    has_game_ended = True

            # Saves turn
            if TRAINING and chosen_squares_length == len(chosen_squares):
                turn = minesweeper.save_turn()

            # Apply game changes
            if not game_over:
                for x, y in flagged_squares:
                    minesweeper.toggle_flag_box(x, y)

                for x, y in safe_squares:
                    minesweeper._RESET_SURF, minesweeper._RESET_RECT = minesweeper.draw_smiley(WINDOWWIDTH/2, 50, 'check.png')
                    game_over = minesweeper.reveal_box(x, y)

            #Add play to DB
            if TRAINING and chosen_squares_length == len(chosen_squares) and len(safe_squares) > 0:
                chosen_squares_length += 1
                turn_chosen_square = minesweeper.save_chosen_square(safe_squares)
                if minesweeper.mine_field[safe_squares[0][0]][safe_squares[0][1]] != 'X':
                    db_good.append((np.asarray(turn),np.asarray(turn_chosen_square)))
                    #print('GOOD', db_good[-1])
                
                if minesweeper.mine_field[safe_squares[0][0]][safe_squares[0][1]] == 'X':
                    db_bad.append((np.asarray(turn),np.asarray(turn_chosen_square)))
                    #print('BAD', db_bad[-1])
                
                plays += 1            
                if plays > TRAINING_COMPLETE:
                    db_good = minesweeper.data_treat(db_good)
                    db_bad = minesweeper.data_treat(db_bad)
                    print('GOOD',db_good[0])
                    print('BAD',db_bad[0])
                    div = int(len(db_good)*.8)
                    training_data = db_good[:div]
                    test_data = db_good[div:]
                    net = Network([FIELDHEIGHT*FIELDWIDTH, 32, 16, FIELDHEIGHT*FIELDWIDTH])
                    print('Network training started...\n')
                    net.SGD(training_data, 30, 10, 0.1, lmbda=5.0)
                    net.save('ANN_test')

                    print('score = {:.2f} %'.format(net.accuracy(test_data)/100))
                    minesweeper.terminate()

            # Check if reset box is clicked
            if minesweeper._RESET_RECT.collidepoint(mouse_x, mouse_y):
                minesweeper.highlight_button(minesweeper._RESET_RECT)
                if mouse_clicked:
                    minesweeper.new_game()
                    has_game_ended = True

            # Highlight unrevealed box
            box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
            if box_x is not None and box_y is not None and not minesweeper.revealed_boxes[box_x][box_y]:
                minesweeper.highlight_box(box_x, box_y)

            # Update screen, wait clock tick
            pygame.display.update()
            minesweeper.clock.tick(FPS)
Beispiel #6
0
from initializers import Xavier
from layers import *
from network import Network

cifar = CIFAR10()
training = cifar.get_named_batches('data_batch_1').subset(10)

net = Network()
net.add_layer(Linear(CIFAR10.input_size, 50, 0, Xavier()))
net.add_layer(ReLU(50))
net.add_layer(Linear(50, CIFAR10.output_size, 0, Xavier()))
net.add_layer(Softmax(CIFAR10.output_size))

Y = net.evaluate(training.images)
print('Cost:', net.cost(training.one_hot_labels, None, Y))
print('Accuracy: {:.2%}'.format(net.accuracy(training.one_hot_labels, None,
                                             Y)))

plt.subplot(1, 3, 1)
plt.imshow(Y)
plt.yticks(range(10), cifar.labels)
plt.xlabel('Image number')
plt.title('Probabilities')

plt.subplot(1, 3, 2)
plt.imshow(cifar.label_encoder.transform(np.argmax(Y, axis=0)).T)
plt.yticks([])
plt.xlabel('Image number')
plt.title('Predicted classes')

plt.subplot(1, 3, 3)
plt.imshow(training.one_hot_labels)
print("")
print("")
print("=========================================")
print("====   Start training")
print("=========================================")
iteration = tuple(range(iters_num))
for i in iteration:
    # Calculate gradients.
    grad = network.gradient(x_train[i], t_train[i])
    # Update parameters.
    for key in network.params:
        network.params[key] -= learning_rate * grad[key]
    if i % iter_per_epoch == 0:
        print("iteration: " + str(i), end='\t')
        i_list.append(i)
        train_acc = network.accuracy(x_train, t_train)
        test_acc = network.accuracy(x_test, t_test)
        test_acc_list.append(test_acc)
        print("Accuracy of training data: {0:.5f}".format(train_acc), end='\t')
        print("test data: {0:.5f}".format(test_acc))

print("")
print("")
print("=========================================")
print("====   Report result")
print("=========================================")


def show_image(img):
    for i in img:
        for j in i:
Beispiel #8
0
import pickle
from mnist_loader import load_data_wrapper
from network import Network

if __name__ == '__main__':
    # Load weights and biases
    with open("temp", "rb") as f:
        W, B = pickle.load(f)

    net = Network([784, 50, 10])
    # Use the weights and biases from the previously trained network
    net.W = W
    net.B = B

    test_data = load_data_wrapper()[2]

    accuracy = net.accuracy(test_data)
    print("Nework accuracy is: %f%%" % (accuracy * 100))
Beispiel #9
0
    Dense(size=10, input_len=2048), Softmax())

optimizer = Adam(network.trainables,
                 learning_rate=lambda n: 0.0001,
                 beta_1=0.9,
                 beta_2=0.999)

avg = IncrementalAverage()
for epoch in range(STARTING_EPOCH, STARTING_EPOCH + EPOCHS):
    batch = 1
    for x, y in make_batch(training_data, training_labels, BATCH_SIZE):
        out = network(x)
        avg.add(np.sum(VectorCrossEntropy.error(out, y)))
        network.backward(VectorCrossEntropy.gradient(out, y), update=True)
        if batch % LOG_FREQ == 0:
            print(
                f"epoch {epoch}/{EPOCHS} | batch {batch} - loss: {avg.get()}")
        batch += 1
    # Testing
    testacc = IncrementalAverage()
    # Split the test data into 10 batches in order to fit in RAM.
    for testbatch in range(0, 10000, 1000):
        testacc.add(
            network.accuracy(test_data[testbatch:testbatch + 1000],
                             test_labels[testbatch:testbatch + 1000]))
    accuracy = testacc.get()
    print(f"Test accuracy : {accuracy}")
    # network.save_weights("path-to-savefile")
    print(f"Epoch {epoch} done =================")
    avg.reset()
Beispiel #10
0
net = Network(model_path, training_dropout=0.5)

# debugging to keep track of accuracy even through checkpoints
graph = []

i = 0
while True:
    print ('step {:,}'.format(i), end='\r')
    # load up to 10 data points with random distribution of
    #   true/false data in random proportions (averaging at 50/50 True/False)
    batch = rearrange_batch(training_data.load(10))

    # debugging accuracy printout
    if i % print_time == 0:
        batch = rearrange_batch(training_data.load(1000000))
        train_accuracy = net.accuracy(batch[0], batch[1], batch[2])
        print('step {:,}, training accuracy {:,.6f}'.format(i, train_accuracy))
        graph.append(train_accuracy)
    # save graph
    if i and i % save_time == 0:
        print ('step {:,}        saving (average = {:,.5f})'.format(i, sum(graph)/len(graph)))
        net.save()
        with open(model_path + '.csv', 'a') as f:
            # Save the accuracies so that we can check them later on
            f.write('\n'.join(str(s) for s in graph) + '\n')
            graph = []

    # training
    net.train(batch[0], batch[1], batch[2])
    i += 1
Beispiel #11
0
class Model(object):
    def __init__(self, config):
        self.epoch_count = 0
        self.config = config
        self.data = DataSet(config)
        self.add_placeholders()
        self.summarizer = tf.summary
        self.net = Network(config, self.summarizer)
        self.optimizer = self.config.solver.optimizer
        self.y_pred = self.net.prediction(self.x, self.keep_prob)
        self.loss = self.net.loss_function(self.x, self.y, self.keep_prob)
        self.accuracy = self.net.accuracy(self.y_pred, self.y)
        self.summarizer.scalar("accuracy", self.accuracy)
        self.summarizer.scalar("loss", self.loss)
        self.train = self.net.train_step(self.loss)
        self.B = self.net.B
        self.A = self.net.A
        self.n_epoch_to_decay = list(range(800, 20000, 1000))[::-1]
        self.next_epoch_to_decay = self.n_epoch_to_decay.pop()
        self.saver = tf.train.Saver()
        self.init = tf.global_variables_initializer()
        self.local_init = tf.local_variables_initializer()
        self.kf = KFold(n_splits=10, random_state=0, shuffle=True)

    def add_placeholders(self):
        self.x = tf.placeholder(tf.float32, shape=[None, self.config.features_dim])
        self.y = tf.placeholder(tf.float32, shape=[None, self.config.labels_dim])
        self.keep_prob = tf.placeholder(tf.float32)

    def train_epoch(self, sess, summarizer):
        merged_summary = self.summarizer.merge_all()
        err, accuracy = list(), list()
        X, Y = self.data.get_train()
        for train, val in self.kf.split(X, y=Y):
            feed_dict = {self.x: X[train], self.y: Y[train], self.keep_prob: self.config.solver.dropout}
            # attention!
            summ, _, loss_, accuracy_ = sess.run([merged_summary, self.train,
                                                  self.loss, self.accuracy], feed_dict=feed_dict)
            summarizer.add_summary(summ)
            err.append(loss_)
            accuracy.append(accuracy_)
        return np.mean(err), np.mean(accuracy)

    def do_eval(self, sess, data):
        if data == "validation":
            err, accuracy = list(), list()
            X, Y = self.data.get_validation()
            for train, val in self.kf.split(X, y=Y):
                feed_dict = {self.x: X[val], self.y: Y[val], self.keep_prob: 1}
                loss_, Y_pred, accuracy_ = sess.run([self.loss, self.y_pred, self.accuracy], feed_dict=feed_dict)
                metrics = evaluate(predictions=Y_pred, labels=Y[val])
                err.append(loss_)
                accuracy.append(accuracy_)
            return np.mean(err), np.mean(accuracy), metrics

        if data == "test":
            X, Y = self.data.get_test()
            feed_dict = {self.x: X, self.y: Y, self.keep_prob: 1}
            loss_, Y_pred, accuracy_ = sess.run([self.loss, self.y_pred, self.accuracy], feed_dict=feed_dict)
            metrics = evaluate(predictions=Y_pred, labels=Y)
            return loss_, accuracy_, metrics

    def fit(self, sess, summarizer):
        sess.run(self.init)
        sess.run(self.local_init)
        max_epochs = self.config.max_epochs
        self.epoch_count = 0
        max_micro_f1 = 0
        max_macro_f1 = 0
        while self.epoch_count < max_epochs:
            if self.config.load:
                break
            loss_train, accuracy_train = self.train_epoch(sess, summarizer['train'])
            loss_val, accuracy_val, metrics_val = self.do_eval(sess, "validation")
            if self.epoch_count == self.next_epoch_to_decay:
                if len(self.n_epoch_to_decay) == 0:
                    self.next_epoch_to_decay = -1
                else:
                    self.next_epoch_to_decay = self.n_epoch_to_decay.pop()
                self.config.learning_rate *= self.config.lr_decay_factor
                print('Decaying learning rate ...')
                print(self.config.learning_rate)
            
            if max_micro_f1 < metrics_val['micro_f1'] and max_macro_f1 < metrics_val['macro_f1']:
                print(self.config.ckptdir_path)
                print("cur_max_Mi-F1 = %g, cur_max_Ma-F1 = %g, cur_epoch = %g." % (
                    metrics_val['micro_f1'], metrics_val['macro_f1'], self.epoch_count))
                self.saver.save(sess, self.config.ckptdir_path + "model.ckpt")
            max_micro_f1 = max(max_micro_f1, metrics_val['micro_f1'])
            max_macro_f1 = max(max_macro_f1, metrics_val['macro_f1'])

            if self.epoch_count % 5 == 0:
                print("After %d training epoch(s), Training : Loss = %g, Validation : Loss = %g." % (
                self.epoch_count, loss_train, loss_val))
                print("train_accuracy = %g, val_accuracy = %g." % (accuracy_train, accuracy_val))
                print("Micro-F1 = %g, Macro-F1 = %g." % (metrics_val['micro_f1'], metrics_val['macro_f1']))
            self.epoch_count += 1
        returnDict = {"train_loss": loss_train, "val_loss": loss_val, "train_accuracy": accuracy_train,
                      "val_accuracy": accuracy_val}
        return returnDict

    def add_summaries(self, sess):
        if self.config.load or self.config.debug:
            path_ = os.path.join("../results/tensorboard" + self.config.dataset_name)
        else:
            path_ = os.path.join("../bin/results/tensorboard" + self.config.dataset_name)
        summary_writer_train = tf.summary.FileWriter(path_ + "/train", sess.graph)
        summary_writer_val = tf.summary.FileWriter(path_ + "/val", sess.graph)
        summary_writer_test = tf.summary.FileWriter(path_ + "/test", sess.graph)
        summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
        return summary_writers