Ejemplo n.º 1
0
    with tf.Session(graph=lstm_graph) as sess:
        tf.global_variables_initializer().run()
        train_score = []
        validation_score = []
        for epoch in range(config.max_epoch):
            print(
                "epoch ****************************************************************************",
                epoch)
            # lr_decay = config.learing_rate_decay ** max(float(epoch + 1 - config.init_epoch), 0.0)
            # current_lr = config.init_learning_rate * lr_decay
            # print("learning_rate............................", current_lr)

            # batch_index = 0
            for batch_X, batch_Y in utils.generate_one_epoch(
                    train_data, train_labels, config.batch_size,
                    config.num_steps):
                train_data_feed = {
                    inputs: batch_X,
                    targets: batch_Y,
                    learning_rate: config.learning_rate,
                    keep_prob: config.keep_prob
                }
                train_loss, _ = sess.run([loss, optimizer], train_data_feed)
                # batch_index += 1
                # print accuracy of train model for each 50 batch
                # if batch_index % 1000 == 0:
                #     print("train_loss: ", train_loss)

            # output training score and validation score for each epoch
Ejemplo n.º 2
0
 ],
              [
                  validation_data, validation_labels, "validation",
                  validation_reader
              ]]:
     data = each[0]
     labels = each[1]
     which_data = each[2]
     reader = each[3]
     final_predict = np.array([])
     total_loss = 0
     num_loss = 0
     print(
         "Data.....................................................................",
         which_data)
     for batch_X, batch_Y in utils.generate_one_epoch(
             data, labels, config.batch_size, shuffle=False):
         data_feed = {
             inputs: batch_X,
             targets: batch_Y,
             keep_prob: 1.0
         }
         loss_p, predict = sess.run([loss, prediction], data_feed)
         total_loss += loss_p
         num_loss += 1
         # predict = predict[0]
         final_predict = np.append(final_predict, predict)
     # print "losssssssssssssssssssssssssssssssssssssssssssssssssssssssssss", which_data, total_loss/num_loss
     final_predict = np.array(final_predict).reshape(
         len(final_predict), 1)
     stages = [utils.get_state(x) for x in final_predict]
     non_eruption_stage = [