Exemplo n.º 1
0
def main(ckpt = None):
    #with tf.Graph().as_default():
    with tf.Session().graph.as_default():
        keep_prob = tf.placeholder("float")

        # データ準備
        images, labels, _ = data_input.load_data([FLAGS.train], FLAGS.batch_size, shuffle = True, distored = True)
        # モデル構築
        logits = model.inference_deep(images, keep_prob, data_input.DST_LONG_SIZE,data_input.DST_SHORT_SIZE, data_input.NUM_CLASS)
        loss_value = model.loss(logits, labels)
        train_op = model.training(loss_value, FLAGS.learning_rate)
        acc = model.accuracy(logits, labels)

        saver = tf.train.Saver(max_to_keep = 0)
        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        if ckpt:
            print 'restore ckpt', ckpt
            saver.restore(sess, ckpt)
        tf.train.start_queue_runners(sess)

        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)

        #モデル構築をモニタリング
        for step in range(FLAGS.max_steps):
            start_time = time.time()
            _, loss_result, acc_res = sess.run([train_op, loss_value, acc], feed_dict={keep_prob: 0.99})
            duration = time.time() - start_time

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
                print (format_str % (datetime.now(), step, loss_result, examples_per_sec, sec_per_batch))
                print 'acc_res', acc_res

            if step % 100 == 0:
                summary_str = sess.run(summary_op,feed_dict={keep_prob: 1.0})
                summary_writer.add_summary(summary_str, step)

            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps or loss_result == 0:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                save_path = saver.save(sess, checkpoint_path, global_step=step)
                print('%s saved' % save_path)

            if loss_result == 0:
                print('loss is zero')
                break
Exemplo n.º 2
0
def main():
    data = data_input.load_data(TRN_FILE, VAL_FILE, skip_rows=1)
    for i in range(ENS_N):
        MODEL_DIR = RES_DIR + '/model' + str(i)
        METAGRAPH_DIR = MODEL_DIR + '/out'
        LOG_DIR = MODEL_DIR + '/log'
        for DIR in (MODEL_DIR, LOG_DIR, METAGRAPH_DIR):
            try:
                os.mkdir(DIR)
            except FileExistsError:
                pass
        print('Architecture: {}'.format(ARCHITECTURE), flush=True)
        m = fc.FC(ARCHITECTURE, HYPERPARAMS, log_dir=LOG_DIR,save_graph_def=True)
        print('Resetting training epochs', flush=True)
        data.train.reset_epochs()
        print('Resetting validation epochs', flush=True)
        data.validation.reset_epochs()
        m.train(data, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, cross_validate=True,
                verbose=True, save=True, save_log=True,outdir=METAGRAPH_DIR)
        print("Trained!",flush=True)
        del m
        tf.reset_default_graph()
Exemplo n.º 3
0
import numpy as np
import sys
sys.path.append("../process")
import data_input
from train_network import Lenet

def train(aug, model, train_x, train_y, test_x, test_y):

    model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics=["accuracy"])
    model.fit(aug.flow(train_x,train_y,batch_size=batch_size), validation_data=(test_x,test_y), steps_per_epoch=len(train_x)//batch_size, epochs=epochs, verbose=1)
    model.save("../predict/item_model.h5")

if __name__ =="__main__":
    channel = 3
    height = 32
    width = 32
    class_num = 41
    norm_size = 32
    batch_size = 32
    epochs = 40
    model = Lenet.neural(channel=channel, height=height, width=width, classes=class_num)
    train_x, train_y = data_input.load_data("../data/item_train", norm_size, class_num)
    test_x, test_y = data_input.load_data("../data/item_val", norm_size, class_num)
    aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest")
    train(aug,model,train_x,train_y,test_x,test_y)





from data_input import load_data

from helper_files.data3d_augment_reformat import augment_n_reformat

# Get hyperparameters
hparams = hyperparameters.create_hparams()

# Set data input path
train_data_dir = hparams.input_dir +'/train'

# Set path to save images to during training
current_results_dir_base = os.path.join(hparams.results_dir, 'during_training')
images_output_path = os.path.join(current_results_dir_base, hparams.model_subdir)

# Load preprocessed data
images, labels = load_data(train_data_dir)
# For the first epoch, do not do data augmentation
images_aug, labels_aug = augment_n_reformat(images, labels, hparams)

# Set up input data placeholder variables
x = tf.placeholder(dtype = tf.float32, shape=images_aug.shape)
y = tf.placeholder(dtype = tf.float32, shape=labels_aug.shape)
input_images = tf.Variable(x, trainable=False, collections=[])
input_labels = tf.Variable(y, trainable=False, collections=[])
image, label = tf.train.slice_input_producer([input_images, input_labels])
images_batch, labels_batch = tf.train.batch([image, label], batch_size=hparams.batch_size)

# Feed input data batch into neural network architecture
logits, _ = model(images_batch, hparams)

# Calculate loss of current network
Exemplo n.º 5
0
 def load_data(self):
     self.x_train, self.y_train, self.x_valid, self.y_valid, self.x_test, self.y_test, self.question, self.word_embedding, self.max_q_len, self.max_sentences, self.max_sen_len, self.vocab_size, self.question_num = data_input.load_data(
         self.config)
     self.encoding = _position_encoding(self.max_sen_len,
                                        self.config.embedding_size)
     self.train = zip(self.question, self.x_train, self.y_train)
     self.valid = zip(self.question, self.x_valid, self.y_valid)
     self.test = zip(self.question, self.x_test, self.y_test)
Exemplo n.º 6
0
    N = EPOCHS
    plt.plot(np.arange(0, N), _history.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), _history.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), _history.history["accuracy"], label="train_acc")
    plt.plot(np.arange(0, N),
             _history.history["val_accuracy"],
             label="val_acc")
    plt.title("loss and accuracy")
    plt.xlabel("epoch")
    plt.ylabel("loss/acc")
    plt.legend(loc="best")
    plt.savefig("./result/result.png")
    plt.show()


if __name__ == "__main__":

    train_x, train_y = data_input.load_data("./data/train", NORM_SIZE,
                                            CLASS_NUM)
    test_x, test_y = data_input.load_data("./data/test", NORM_SIZE, CLASS_NUM)

    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    train(aug, train_x, train_y, test_x, test_y)
Exemplo n.º 7
0
    plt.savefig("../result/result.png")
    plt.show()


if __name__ == "__main__":
    channel = 3
    height = 32
    width = 32
    class_num = 62
    norm_size = 32  #参数
    batch_size = 32
    epochs = 40
    model = Lenet.neural(channel=channel,
                         height=height,
                         width=width,
                         classes=class_num)  #网络
    train_x, train_y = data_input.load_data("../data/train", norm_size,
                                            class_num)
    test_x, test_y = data_input.load_data("../data/test", norm_size,
                                          class_num)  #生成数据

    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")  #数据增强,生成迭代器

    train(aug, model, train_x, train_y, test_x, test_y)  #训练
Exemplo n.º 8
0
from knn import knn
from neural_network import Network
from bayes import Bayes
from svm_class import Svm
from data_input import load_data, load_data_test, data_to_libsvm, testdata_to_libsvm
from progress_until import Progress

numToTrain = 60000
numToClassfy = 10000
model_num = 5
model_now = 0

results = np.zeros([model_num, numToClassfy], dtype=int)

print("开始读取MNIST数据:")
images, label = load_data()
test_images, test_label = load_data_test()
#libsvm格式数据读取
svm_label, svm_images = data_to_libsvm(images, label)
svm_test_label, svm_test_images = testdata_to_libsvm(test_images, test_label)

#数据二值化
images = np.where(images >= 128, 1, 0)
test_images = np.where(test_images >= 128, 1, 0)

network = Network(784, 2, [200, 10], 10)
network.startLearing(numToTrain, images, label, 1)
results[model_now] = network.test(test_images, test_label, numToClassfy)
model_now += 1

print("SVM 1训练中...")