Ejemplo n.º 1
0
def main(args):
    checkArgs()
    if FLAGS.testing:
        model.test(FLAGS)
    elif FLAGS.finetune:
        model.training(FLAGS, is_finetune=True)
    else:
        model.training(FLAGS, is_finetune=False)
Ejemplo n.º 2
0
def main(args):
    checkArgs()
    if FLAGS.testing:
        model.test(FLAGS)
    elif FLAGS.finetune:
        model.training(FLAGS, is_finetune=True)
    else:
        model.training(FLAGS, is_finetune=False)
Ejemplo n.º 3
0
def main(args):
    checkArgs()
    if FLAGS.testing:
        model.test(FLAGS)
    elif FLAGS.finetune:
        model.training(FLAGS, is_finetune=True)
    else:
        model.training(FLAGS, is_finetune=False)

    if FLAGS.testing:
        path = FLAGS.test_dir
        fd = open(path)
        image_filenames = []
        filenames = []
        for i in fd:
            i = i.strip().split(" ")
            image_filenames.append(i[0])
        count = 0
        for image_path in image_filenames:
            orig_image = cv2.imread(image_path)
            
            img_seg = cv2.imread(os.getcwd() + "/out_image/" + str(image_filenames[count]).split('/')[-1])
            img_seg = cv2.resize(img_seg, (orig_image.shape[1], orig_image.shape[0]))
            img_seg = np.array(img_seg)
            #print(img_seg.shape)
            cv2.imwrite("out_image/" + str(image_filenames[count]).split('/')[-1], img_seg)
            #cv2.imshow("segmented resized", img_seg)
            #cv2.waitKey(0)
            points = []
            count_red = 1
            count_green = 1
            for i in range(img_seg.shape[0]):
                for j in range(img_seg.shape[1]):
                    if((img_seg[i,j,0] == 2 and img_seg[i,j,1] == 2 and img_seg[i,j,2] == 2)):
                        points.append([j,i])
                        count_green += 1
                    if((img_seg[i,j,0] == 9 and img_seg[i,j,1] == 9 and img_seg[i,j,2] == 9)):  
                        points.append([j,i])
                        count_red += 1  

            points = np.array(points)           
            x, y, w, h = cv2.boundingRect(points)
            modified_image = orig_image[y:y+j,x:x+w]            
            #modified_image = cv2.resize(modified_image, Size(960, 720))
            #cv2.imshow("cropped", modified_image)
            #cv2.waitKey(0)
            cv2.imwrite("modified_image.jpg", modified_image)
            modified_image, no_box = rd.pothole_detect(os.getcwd()+ "/modified_image.jpg")
            print("Image: "+ image_path)
            print("The quality factor is " + str(sigmoid(float(count_red)/(count_red + count_green) + 0.15*no_box)))
            orig_image[y:y+j,x:x+w] = modified_image
            name = "road_damage/ans" + str(count) + ".jpg"
            cv2.imwrite(name, orig_image)
            count = count + 1
Ejemplo n.º 4
0
def train_and_eval(model_type, directory, SUB, batch_size, epoch, model_dir):
    #模型训练和预测
    if not (os.path.exists(directory + "train_div.csv")
            or os.path.exists(directory + "sub_all.csv")):
        data_preprocess(directory)

    if (SUB):
        print("use all train data")
        submitting(model_type, directory, model_dir)
    else:
        print("use only 80% train data")
        training(model_type, directory, batch_size, epoch, model_dir)
Ejemplo n.º 5
0
def main():
    print("SEGNET")
    args = checkArgs()
    print("SAVE TO "+args.log_dir)
    args.use_weights = str2bool(args.use_weights)
    if args.infere:
        model.infere(args)
    elif args.testing:
        model.test(args)
    elif args.finetune:
        model.training(args, is_finetune=True)
    else:
        model.training(args, is_finetune=False)
Ejemplo n.º 6
0
def train():
    tr, va, te = read_dataset('../mnist.pkl.gz')
    binarizer = LabelBinarizer().fit(range(10))

    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])
    keep_prob = tf.placeholder(tf.float32)
    preds = model.inference(x, keep_prob)
    loss, total_loss = model.loss(preds, y)
    acc = model.evaluation(preds, y)
    # learning rate: 0.1
    train_op = model.training(total_loss, 0.1)

    init = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init)
    for i in xrange(10000):
        batch_xs, batch_ys = tr.next_batch(50)
        if i % 100 == 0:
            train_acc = acc.eval(feed_dict={
                x:batch_xs, y:binarizer.transform(batch_ys),
                keep_prob: 1.0}, session=sess)
            print "step: {0}, training accuracy {1}".format(i, train_acc)
            validation_accuracy = getAccuracy(x, y, keep_prob, binarizer, acc, va, sess)
            print("Validation accuracy : {0}".format(validation_accuracy))
        train_op.run(feed_dict={
            x:batch_xs, y:binarizer.transform(batch_ys), keep_prob: 0.5},
                     session=sess)

    test_accuracy = getAccuracy(x, y, keep_prob, binarizer, acc, te, sess)
    print("Test accuracy : ", test_accuracy)
Ejemplo n.º 7
0
def run_training():
    # 模型参数保存路径
    logs_train_dir = 'logs/train/'

    # 读取训练集与测试集数据
    data_resource = input_data.DataProcessor('images', 'image_labels_dir',
                                             'labels.txt')
    training_data_set = data_resource.get_dataset(IMG_W, IMG_H, BATCH_SIZE,
                                                  'training')
    training_iterator = training_data_set.make_initializable_iterator()
    testing_data_set = data_resource.get_dataset(IMG_W, IMG_H, BATCH_SIZE,
                                                 'testing')
    testing_iterator = testing_data_set.make_initializable_iterator()

    # 生成batch
    test_batch, test_label_batch = testing_iterator.get_next()
    train_batch, train_label_batch = training_iterator.get_next()

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    # log汇总
    summary_op = tf.summary.merge_all()
    # 产生会话
    sess = tf.Session()
    # 产生一个writer写log文件
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    # 产生saver来存储训练好的模型
    saver = tf.train.Saver()
    # 所有节点初始化
    sess.run(tf.global_variables_initializer())
    sess.run(training_iterator.initializer)

    for step in np.arange(MAX_STEP):
        # 执行MAX_STEP步训练,一步一个batch
        try:
            _, tra_loss, tra_acc, summary_str = sess.run(
                [train_op, train_loss, train__acc, summary_op])
        except tf.errors.OutOfRangeError:  # 训练完一个epoch会到这里
            sess.run(training_iterator.initializer)
            _, tra_loss, tra_acc, summary_str = sess.run(
                [train_op, train_loss, train__acc, summary_op])
        if step % 20 == 0:
            print('Step %d, train loss = %.6f, train accuracy = %.2f%%' %
                  (step, tra_loss, tra_acc * 100.0))
            train_writer.add_summary(summary_str, step)
        if step % 2000 == 0 or (step + 1) == MAX_STEP:
            checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)

    # 验证模型,只测了一个batch
    test_logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES, True)
    test_loss = model.losses(test_logits, test_label_batch)
    test__acc = model.evaluation(test_logits, test_label_batch)
    sess.run(testing_iterator.initializer)
    print(sess.run([test_loss, test__acc]))

    sess.close()
Ejemplo n.º 8
0
def run_training():
    traindir = 'data/train/'
    logs_train_dir = 'logs/train/'

    train_image, train_label = input_data.get_files(traindir)
    train_batch, train_label_batch = input_data.get_batch(
        train_image, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    sess = tf.Session()
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in range(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 20 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 9
0
def run_training(args):
    with tf.Graph().as_default():

        # data_train, data_validation, im_size = data.load_data_random(args.n_images, im_size=(256,256), light_size=(8,8))
        # data_train, data_validation, im_size = data.load_data_smooth(args.n_images, im_size=(256,256), light_size=(8,8))
        # data_train, data_validation, im_size = data.load_data_grid(args.n_images, im_size=(256,256), light_size=(8,8))
        # data_train, data_validation = data.load_Tgray_mat(args.n_images)
        data_train, data_validation, im_size = data.load_Green_mat(
            args.n_images)

        X_tensor = tf.placeholder(tf.float32,
                                  shape=(None, data.INPUT_DIM),
                                  name="input")
        yt_tensor = tf.placeholder(tf.float32,
                                   shape=(None, data.OUTPUT_DIM),
                                   name="output")

        y_tensor = model.inference(X_tensor,
                                   n_units=15,
                                   output_dim=data.OUTPUT_DIM)
        loss_tensor = model.loss(y_tensor, yt_tensor)
        error_tensor = model.training_error(loss_tensor, yt_tensor)
        train_op = model.training(loss_tensor, args.learning_rate)

        config = tf.ConfigProto(device_count={'GPU': 0})
        if args.gpu: config = tf.ConfigProto()
        init = tf.initialize_all_variables()
        saver = tf.train.Saver()
        sess = tf.Session(config=config)
        sess.run(init)

        # show_image(data_train[0,...,-2], im_size)
        show_image(data_train[0, ..., -1], im_size)
        # y_ = run_inference(sess, X_tensor, y_tensor, data_train[0,...,:-1])
        # show_image(y_[:,0], im_size)

        for step in range(args.max_steps):
            X_data, yt_data = data.split_input_output(
                data.next_batch_images(data_train, args.batch_size))
            # print(X_data.min(axis=0))
            # print(X_data.max(axis=0))
            # print(yt_data.min(axis=0))
            # print(yt_data.max(axis=0))
            feed_dict = {X_tensor: X_data, yt_tensor: yt_data}
            _, loss_value, error = sess.run(
                [train_op, loss_tensor, error_tensor], feed_dict=feed_dict)

            if step % 5 == 0:
                epoch = step * args.batch_size / data_train.shape[0]
                print('Step %d (epoch %.2f): loss = %.2f (error = %.3f)' %
                      (step, epoch, loss_value, error))
                # y_ = run_inference(sess, X, y_tensor, (0.5, 0.5), data.TGRAY_SIZE)
                # show_image(y_[:,0], data.TGRAY_SIZE)

            if (step + 1) % 5 == 0:
                y_ = run_inference(sess, X_tensor, y_tensor,
                                   data_train[0, ..., :-1])
                # y_ = run_inference(sess, X_tensor, y_tensor, X_data[:im_size[0]*im_size[1]])
                # show_image(y_[:,0], im_size)
                write_image(y_[:, 0], im_size, 'results/green-%i.jpg' % step)
Ejemplo n.º 10
0
def train():
    train_batch, label_batch = id.read_and_save()
    train_batch = tf.cast(train_batch, dtype=tf.float32)
    label_batch = tf.cast(label_batch, dtype=tf.int64)

    keep_prob = tf.placeholder(tf.float32)
    logits = model.inference(train_batch, keep_prob)
    loss = model.losses(logits, label_batch)
    op = model.training(loss=loss)
    accuracy = model.evaluation(logits, label_batch)
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()

    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(LOG_DIR, graph=sess.graph)
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runner(sess=sess, coord=coord)
        try:
            for step in range(MAX_STEP):
                _, train_loss, train_acc = sess.run([op, loss, accuracy], feed_dict={keep_prob: 0.75})
                if step % 50 == 0:
                    print('Step %d, train loss = %.2f, train accuracy = %.2f' % (step, train_loss, train_acc))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)
                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(LOG_DIR, "model.ckpt")
                    saver.save(sess, checkpoint_path)
        except tf.errors.OutOfRangeError:
            print('An error occur')
        finally:
            coord.request_stop()
        coord.join(threads=threads)
Ejemplo n.º 11
0
def run_training():
    data_dir = 'D:/WCsPy/data/train/'
    log_dir = 'saves'
    image, label = inputData.get_files(data_dir)
    image_batches, label_batches = inputData.get_batches(
        image, label, 32, 32, 16, 20)
    print(image_batches.shape)
    p = model.mmodel(image_batches, 16)
    cost = model.loss(p, label_batches)
    train_op = model.training(cost, 0.001)
    acc = model.get_accuracy(p, label_batches)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(1000):
            print(step)
            if coord.should_stop():
                break
            _, train_acc, train_loss = sess.run([train_op, acc, cost])
            print("loss:{} accuracy:{}".format(train_loss, train_acc))
            if step % 100 == 0:
                check = os.path.join(log_dir, "model.ckpt")
                saver.save(sess, check, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done!!!")
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 12
0
def run_training(train_data,target_data,load=False , load_session = None):
    with tf.Graph().as_default():
        print("Starting building graph " + str(datetime.datetime.now()))
        batch_placeholders = tf.placeholder(tf.float32, shape=(None,params.number_of_steps,params.num_of_features))
        target_batch_placeholders = tf.placeholder(tf.int32, shape=(None,params.number_of_steps))
        loss,probabilities = model.inference(batch_placeholders,target_batch_placeholders)
        training = model.training(loss, params.learning_rate)
        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)
        saver = tf.train.Saver(tf.trainable_variables())
        if load is True:
           saver.restore(sess, load_session)
           print("Restored!!!")
        for k in range(1, params.num_iters):
            print("Starting iter " + str(k) + " " + str(datetime.datetime.now()))
            data_batch , target_batch = get_next_batch(train_data,target_data)
            print(target_batch)
            feed_dict = {batch_placeholders: data_batch,target_batch_placeholders:target_batch}
            _, loss_value , prob= sess.run([training, loss,probabilities], feed_dict=feed_dict)
            print([("{0:.3f}".format(np.argmax(prob[i][0])),"{0:.3f}".format(np.max(prob[i][0]))) for i in range(params.number_of_steps)])
            print([("{0:.3f}".format(np.argsort(prob[i])[0][-2]),"{0:.3f}".format(np.sort(prob[i])[0][-2])) for i in range(params.number_of_steps)])
            print([("{0:.3f}".format(np.argsort(prob[i])[0][-3]), "{0:.3f}".format(np.sort(prob[i])[0][-3])) for i in range(params.number_of_steps)])
            print(loss_value)
            if k % params.save_per_iter == 0 or k==10:
              saver.save(sess, params.output_path + str(k) + '.sess')
Ejemplo n.º 13
0
def main(data, model):
    print("Reading images...")
    x_train = data.read_images('./data/trainImage256.txt', TRAIN_DATA_SIZE)
    x_test = data.read_images('./data/testImage256.txt', TEST_DATA_SIZE)
    y_train = data.read_labels('./data/trainLABEL256.txt', TRAIN_DATA_SIZE)
    y_test = data.read_labels('./data/testLABEL256.txt', TEST_DATA_SIZE)
    
    print("Creating model...")
    model.create_model(multi_gpu=False)

    print("Now training...")
    history = model.training(x_train, y_train, x_test, y_test)
    accuracy = history.history["accuracy"]
    loss = history.history["loss"]
    eval = model.evaluate(x_test, y_test)
    
    print("accuracy = " + str(eval))
    model.save('./model.h5')

    if not os.path.exists('./result_keras'):
        os.mkdir('./result_keras')
    for i in range(TEST_DATA_SIZE):
        ret = model.predict(x_test[i, :, :, 0].reshape([1, IMG_SIZE, IMG_SIZE, 1]), 1)
        np.savetxt('./result_keras/' + str(i) + '.txt', ret[0, :, :, 0])
    
    with open("training_log.txt", "w") as f:
        for i in range(training_epochs):
            f.write(str(loss[i]) + "," + str(accuracy[i]) + "\n")
    ax1 = plt.subplot()
    ax1.plot(loss, color="blue")
    ax2 = ax1.twinx()
    ax2.plot(accuracy, color="orange")
    plt.show()
Ejemplo n.º 14
0
 def setUp(self):
     self.data, self.config_map = read_input('Input/test_data.csv',
                                             'Input/data_config.json')
     self.estimator = linear_model.LinearRegression()
     self.data = data_preprocessor(self.data, self.config_map, 5, 'string')
     self.X_train, self.y_train, self.X_validation, self.y_validation, self.X_test, self.y_test = split_train_test_validation_data(
         self.data, self.config_map, 0.25, 0.2, 5)
     self.X_scaler, self.y_scaler, self.model, self.training_rmse = training(
         self.estimator, self.X_train, self.y_train, self.config_map)
Ejemplo n.º 15
0
def run_training():
    train_dir = "D:\新建文件夹\python foot/train/"
    log_train_dir = "D:\新建文件夹\python foot/train_savenet/"
    vadiation_dir = 'D:\新建文件夹\python foot/valiation/'
    train, train_labels = pre_process.get_files(train_dir)
    train_batch, train_label_batch = pre_process.get_batch(
        train, train_labels, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.loss(train_logits, train_label_batch)
    train_op = model.training(train_loss, LEARNING_RATE)
    train_acc = model.evalution(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all(
    )  #merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。
    # 一般这一句就可显示训练时的各种信息。
    #vadiation, vadiation_labels = pre_process.get_files(vadiation_dir)
    #vadiation_batch, vadiation_label_batch = pre_process.get_batch(vadiation, vadiation_labels, IMG_W,IMG_H,BATCH_SIZE, CAPACITY)
    #vadiation_logits = model.inference(vadiation_batch, BATCH_SIZE, N_CLASSES)
    #vadiation_loss = model.loss(vadiation_logits, vadiation_label_batch)
    #vadiation_acc = model.evalution(vadiation_logits, vadiation_label_batch)
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(log_train_dir,
                                         sess.graph)  #指定一个文件用来保存图
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    #  Coordinator  和 start_queue_runners 监控 queue 的状态,不停的入队出队
    coord = tf.train.Coordinator(
    )  #https://blog.csdn.net/weixin_42052460/article/details/80714539
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:  #%.2f表示输出浮点数并保留两位小数。%%表示直接输出一个%
                print("step %d, train loss = %.2f, train accuracy  = %.2f%%" %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)  #?????????????

            if step % 2000 == 0 or (step + 1) == STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                print(
                    "step %d, vadiation loss = %.2f, vadiation accuracy  = %.2f%%"
                    % (step, vadiation_loss, vadiation_acc * 100.0))
                checkpoint_path = os.path.join(log_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')

    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 16
0
def run_fractional_stratification_model(
        estimator=None,
        data_path=None,
        config_path=None,
        num_iter=1,
        seed=None):
    '''
    Fractional Stratification Model Analysis
    '''
    if estimator is None or data_path is None or config_path is None:
        raise ValueError('Need Estimator, Data path and Config Path as arguments !')
    data, config_map = read_input(data_path, config_path)
    data = data_preprocessor(data, config_map, 5, 'string')
    training_map = {}
    for _ in range(0, num_iter):
        training_data, validation_data, testing_data = fractional_stratification(
                data, data.columns, 4, [0.6, 0.2, 0.2], config_map, seed)
        X_train, y_train = split_data(training_data, config_map)
        X_validation, y_validation = split_data(validation_data, config_map)
        X_test, y_test = split_data(testing_data, config_map)
        X_scaler, y_scaler, model, training_rmse = training(
                estimator, X_train, y_train, config_map)
        validation_rmse = calculate_rmse(
                X_validation,
                y_validation,
                X_scaler,
                y_scaler,
                model,
                config_map)
        testing_rmse = calculate_rmse(
                X_test, y_test, X_scaler, y_scaler, model, config_map)
        if training_rmse < validation_rmse:
            model_properties = {}
            model_properties['estimator'] = estimator
            model_properties['config_map'] = config_map
            model_properties['X_train'] = X_train
            model_properties['y_train'] = y_train
            model_properties['X_validation'] = X_validation
            model_properties['y_validation'] = y_validation
            model_properties['X_test'] = X_test
            model_properties['y_test'] = y_test
            model_properties['X_scaler'] = X_scaler
            model_properties['y_scaler'] = y_scaler
            model_properties['model'] = model
            model_properties['training_rmse'] = training_rmse
            model_properties['validation_rmse'] = validation_rmse
            model_properties['testing_rmse'] = testing_rmse
            training_map[validation_rmse] = model_properties
    if(len(training_map) > 0):
        best_model_properties = training_map[min(training_map)]
        print('Best Model train error: {} | Best Model validation error: {} | Best Model test error: {}'.format(
            round(best_model_properties['training_rmse'], 7),
            round(best_model_properties['validation_rmse'], 7),
            round(best_model_properties['testing_rmse'], 7)))
        return best_model_properties
    return None
Ejemplo n.º 17
0
def run_model_tuning(model_properties=None):
    '''
    Tunes the Model based on Training and Validation error
    '''
    if model_properties is None:
        raise ValueError('Need Model Properties as argument !')
    alphas = np.logspace(-10, 1, 400)
    config_map = model_properties['config_map']
    X_train = model_properties['X_train']
    y_train = model_properties['y_train']
    X_validation = model_properties['X_validation']
    y_validation = model_properties['y_validation']
    X_test = model_properties['X_test']
    y_test = model_properties['y_test']
    tuning_map = {}
    for alpha in alphas:
        estimator = Ridge(alpha=alpha)
        X_scaler, y_scaler, model, training_rmse = training(
                estimator, X_train, y_train, config_map)
        validation_rmse = calculate_rmse(
                X_validation,
                y_validation,
                X_scaler,
                y_scaler,
                model,
                config_map)
        testing_rmse = calculate_rmse(
                X_test, y_test, X_scaler, y_scaler, model, config_map)
        tuning_properties = {}
        tuning_properties['estimator'] = estimator
        tuning_properties['config_map'] = config_map
        tuning_properties['X_scaler'] = X_scaler
        tuning_properties['y_scaler'] = y_scaler
        tuning_properties['model'] = model
        tuning_properties['training_rmse'] = training_rmse
        tuning_properties['validation_rmse'] = validation_rmse
        tuning_properties['testing_rmse'] = testing_rmse
        tuning_map[validation_rmse] = tuning_properties
    if(len(tuning_map) > 0):
        best_model_properties = tuning_map[min(tuning_map)]
        best_model_properties['config_map'] = config_map
        best_model_properties['X_train'] = X_train
        best_model_properties['y_train'] = y_train
        best_model_properties['X_validation'] = X_validation
        best_model_properties['y_validation'] = y_validation
        best_model_properties['X_test'] = X_test
        best_model_properties['y_test'] = y_test
        print('Best Model train error: {} | Best Model validation error: {} | Best Model test error: {}'.format(
            round(best_model_properties['training_rmse'], 7),
            round(best_model_properties['validation_rmse'], 7),
            round(best_model_properties['testing_rmse'], 7)))
        return best_model_properties
    return None
Ejemplo n.º 18
0
def training_preload(model_name, labelNames, t_batch_train, t_batch_evalu,
                     log_train_dir, max_step, learning_rate):
    # Getting all training required operations
    t_op_logits, t_op_pred, classes = model.classification_inference(
        t_batch_train[0], labelNames, model_name)
    t_op_loss = model.losses(t_op_logits, t_batch_train[1])
    t_op_acc = model.evaluation(t_op_logits, t_batch_train[1])
    t_op_train = model.training(t_op_loss, learning_rate)

    model.writeSummaries(t_op_acc, t_op_loss, scope='training')
    t_op_summary = tf.summary.merge_all()

    with tf.Session() as sess:
        # Summary for tensorboard
        summary_writer = tf.summary.FileWriter(logdir=log_train_dir,
                                               graph=sess.graph,
                                               filename_suffix='training')
        # Saver for saving model
        saver = tf.train.Saver()
        # Initialize variables
        sess.run(tf.global_variables_initializer())
        # Tensorflow Thread control
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            for step in range(1, max_step + 1):
                if coord.should_stop():
                    break

                _, tra_loss, tra_acc, summary_str = sess.run(
                    [t_op_train, t_op_loss, t_op_acc, t_op_summary])
                summary_writer.add_summary(summary_str, step)

                if step % 100 == 0 or step == 1:
                    print('\n')
                    print(
                        'Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc * 100.0))
                    print('', end='', flush=True)

                if step % 200 == 0 or step == max_step:
                    checkpoint_path = os.path.join(log_train_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

                print('.', end='', flush=True)

        except tf.errors.OutOfRangeError:
            print('Done training -- step limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
Ejemplo n.º 19
0
def run_training1():
    train_dir = 'G:/Python/dogorcat/data/train'
    tfrecords_dir = 'tfrecords/'
    tfrecords_file = 'test.tfrecords'
    logs_train_dir = 'logs/recordstrain/'
    # images, labels = cr.get_files(train_dir)
    # cr.convert_to_tfrecord(images, labels, tfrecords_dir, tfrecords_file)

    train_batch, train_label_batch = cr.read_and_decode(tfrecords_dir +
                                                        tfrecords_file,
                                                        batch_size=BATCH_SIZE)
    train_batch = tf.cast(train_batch, dtype=tf.float32)
    train_label_batch = tf.cast(train_label_batch, dtype=tf.int64)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Ejemplo n.º 20
0
def main(ckpt = None):
    #with tf.Graph().as_default():
    with tf.Session().graph.as_default():
        keep_prob = tf.placeholder("float")

        # データ準備
        images, labels, _ = data_input.load_data([FLAGS.train], FLAGS.batch_size, shuffle = True, distored = True)
        # モデル構築
        logits = model.inference_deep(images, keep_prob, data_input.DST_LONG_SIZE,data_input.DST_SHORT_SIZE, data_input.NUM_CLASS)
        loss_value = model.loss(logits, labels)
        train_op = model.training(loss_value, FLAGS.learning_rate)
        acc = model.accuracy(logits, labels)

        saver = tf.train.Saver(max_to_keep = 0)
        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        if ckpt:
            print 'restore ckpt', ckpt
            saver.restore(sess, ckpt)
        tf.train.start_queue_runners(sess)

        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)

        #モデル構築をモニタリング
        for step in range(FLAGS.max_steps):
            start_time = time.time()
            _, loss_result, acc_res = sess.run([train_op, loss_value, acc], feed_dict={keep_prob: 0.99})
            duration = time.time() - start_time

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
                print (format_str % (datetime.now(), step, loss_result, examples_per_sec, sec_per_batch))
                print 'acc_res', acc_res

            if step % 100 == 0:
                summary_str = sess.run(summary_op,feed_dict={keep_prob: 1.0})
                summary_writer.add_summary(summary_str, step)

            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps or loss_result == 0:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                save_path = saver.save(sess, checkpoint_path, global_step=step)
                print('%s saved' % save_path)

            if loss_result == 0:
                print('loss is zero')
                break
Ejemplo n.º 21
0
def run_training():
    train_dir = 'C://Users/Sizhe/Desktop/CatsvsDogs/data/train/'
    logs_train_dir = 'C://Users/Sizhe/Desktop/CatsvsDogs/data/logs/train/'

    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(train,
                                               train_label,
                                               image_width,
                                               image_height,
                                               batch_size,
                                               capacity)
    train_logits = model.inference(train_batch, batch_size, n_class)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir,sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess = sess, coord = coord)

    try:
        for step in np.arange(max_step):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

### step%50 when training            
            if step%50 == 0:
                print ('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.00))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step%2000 == 0 or step == max_step-1:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step= step)

    except tf.errors.OutOfRangeError:
        print ('Training finished -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Ejemplo n.º 22
0
def run_training():
    """train model for a number of steps"""
    print(time.strftime("%Y-%m-%d %H:%M:%S") + "  start reading data")
    data_sets = input_data.read_data("invited_info_trainoutput.txt")
    print(time.strftime("%Y-%m-%d %H:%M:%S") + "  end reading data")
    with tf.Graph().as_default():
        docs_placeholder, labels_placeholder, keep_prob_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = model.inference(docs_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2, keep_prob_placeholder)
        loss = model.loss(logits, labels_placeholder)
        train_op = model.training(loss, FLAGS.learning_rate)
        eval_correct = model.evaluation(logits, labels_placeholder)
        summary_op = tf.merge_all_summaries()
        init = tf.initialize_all_variables()
        saver = tf.train.Saver()
        sess = tf.Session()
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
        sess.run(init)
        for step in range(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, docs_placeholder,
                                       labels_placeholder,
                                       keep_prob_placeholder, 0.5)
            _, loss_value = sess.run([train_op, loss], feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess, eval_correct, docs_placeholder,
                        labels_placeholder, keep_prob_placeholder,
                        data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, docs_placeholder,
                        labels_placeholder, keep_prob_placeholder,
                        data_sets.validation)
Ejemplo n.º 23
0
def run_training():
    train_dir = "/Users/yuwhuawang/tensorflow/catsordogs/train/"
    logs_train_dir = "/Users/yuwhuawang/tensorflow/catsordogs/logs/train/"

    train, train_label = input_data.get_files(train_dir)

    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, LEARNING_RATE)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, trn_loss, trn_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:
                print("Step {}, train loss = {:.2f}, train accuracy = {:.2f}".
                      format(step, trn_loss, trn_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training --epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
def run_training(logs_train_dir, tfrecords_file):
    train_batch, train_label_batch = cr.read_and_decode(tfrecords_file,
                                                        batch_size=BATCH_SIZE)
    train_batch = tf.cast(train_batch, dtype=tf.float32)  #数据格式转换
    train_label_batch = tf.cast(train_label_batch, dtype=tf.int64)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()  #get all monitored operations
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(
        logs_train_dir, sess.graph)  #生成一个写日志的writer,并将当前的计算图写入日志
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
            if step % 10 == 0:
                print("**********************")
                print("Step %d, train loss = %.5f, train accuracy = %.2f%%" %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)  #get monitoring results
                train_writer.add_summary(summary_str, step)  #write file

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
                #saver.restore(session,checkpoint_filepath) #continue trainning

    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached")

    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Ejemplo n.º 25
0
def run_training():
    train_dir = 'C:/datasets/emnist/train/'
    logs_summary_dir = './log/summary/train/'
    check_point_path = './log/model/'

    train, train_labels = data_loader.get_files(train_dir)
    train_batch, train_label_batch = data_loader.get_batch(
        train, train_labels, IMG_H, IMG_W, BATCH_SIZE, CAPACITY)
    train_logits, _ = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, LEARNING_RATE)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summery_op = tf.summary.merge_all()
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(logs_summary_dir,
                                             graph=sess.graph,
                                             session=sess)
        saver = tf.train.Saver(max_to_keep=1)
        if os.path.exists(os.path.join(check_point_path, 'checkpoint')):
            saver.restore(sess, tf.train.latest_checkpoint(check_point_path))
        else:
            sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            for step in range(MAX_STEPS):
                if coord.should_stop(): break
                _, tra_loss, tra_acc = sess.run(
                    [train_op, train_loss, train_acc])

                if step % 50 == 0:
                    print('The training loss and acc respectively: %.2f %.2f' %
                          (tra_loss, tra_acc))
                    summary_total = sess.run(summery_op)
                    train_writer.add_summary(summary_total, global_step=step)

                if step % 2000 == 0 or (step + 1) == MAX_STEPS:
                    saver.save(sess, check_point_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('training done!')
        finally:
            coord.request_stop()
    coord.join(threads)
Ejemplo n.º 26
0
def run_training():
    train_dir = 'E:/Code/Dog vs Cat/train/'
    logs_train_dir = 'E:/Code/Dog vs Cat/log/'

    train, train_label = input_data.get_file(train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    time_start = time.time()
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training --epoch limit reached')
    finally:
        coord.request_stop()
    time_end = time.time()
    train_time = time_end - time_start
    print("train time:", train_time)
    coord.join(threads)
    sess.close()
Ejemplo n.º 27
0
def run_training():

    train_image, train_label = input_data.read_tfrecords('train.tfrecords')
    train_batch, train_label_batch = input_data.get_batch(
        train_image, train_label, BATCH_SIZE)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES, 1)
    train_loss = model.softmax_loss(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(LOGDIR)
    train_writer.add_graph(sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0:
                checkpoint_path = os.path.join(LOGDIR, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training --epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Ejemplo n.º 28
0
def run_training(file_tfRecord):
    log_dir = './log/'
    image, label = rdData.read_tfRecord(file_tfRecord)
    image_batches, label_batches = tf.train.batch([image, label],
                                                  batch_size=16,
                                                  capacity=20)
    p = model.mmodel(image_batches, 16)
    cost = model.loss(p, label_batches)
    train_op = model.training(cost, 0.001)
    acc = model.get_accuracy(p, label_batches)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    #merge all summary
    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(log_dir, sess.graph)

    sess.run(init)
    saver = tf.train.Saver()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(1000):
            if coord.should_stop():
                break
            _, train_acc, train_loss = sess.run([train_op, acc, cost])
            print("{} step:{} loss:{} accuracy:{}".format(
                datetime.now(), step, train_loss, train_acc))
            if step % 250 == 0:
                #record the summary
                summary = sess.run(summary_op)
                train_writer.add_summary(summary, step)

                check = os.path.join(log_dir, "mmodel.ckpt")
                saver.save(sess, check, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done!!!")
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 29
0
def run_training():
    train_dir = 'G:/python/cats vs dogs/data/train/train/'
    logs_train_dir = 'G:/python/cats vs dogs/data/train/log/'
    train, train_label = input_data.get_files(train_dir)
    # print(len(train))
    # print(len(train_label))
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    # print(train_batch.shape)
    # (16, 208, 208, 3) (batch_size,img_w,img_h,nchannels)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # print(train_logits.shape)
    # (16, 2)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 50 == 0:
                print('Step %d,train loss= %.2f, train_acc=%.2f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training!')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 30
0
def run_train_test_model(
        estimator=None,
        data_path=None,
        config_path=None,
        num_iter=1,
        seed=None):
    '''
    Train/test Model Analysis
    '''
    if estimator is None or data_path is None or config_path is None:
        raise ValueError('Need Estimator, Data path and Config Path as arguments !')
    data, config_map = read_input(data_path, config_path)
    data = data_preprocessor(data, config_map, 5, 'string')
    training_map = {}
    for _ in range(0, num_iter):
        X_train, y_train, X_test, y_test = split_train_test_data(
                data, config_map, 0.3, seed)
        X_scaler, y_scaler, model, training_rmse = training(
                estimator, X_train, y_train, config_map)
        testing_rmse = calculate_rmse(
                X_test, y_test, X_scaler, y_scaler, model, config_map)
        if training_rmse < testing_rmse:
            model_properties = {}
            model_properties['estimator'] = estimator
            model_properties['config_map'] = config_map
            model_properties['X_train'] = X_train
            model_properties['y_train'] = y_train
            model_properties['X_test'] = X_test
            model_properties['y_test'] = y_test
            model_properties['X_scaler'] = X_scaler
            model_properties['y_scaler'] = y_scaler
            model_properties['model'] = model
            model_properties['training_rmse'] = training_rmse
            model_properties['testing_rmse'] = testing_rmse
            training_map[testing_rmse] = model_properties
    if(len(training_map) > 0):
        best_model_properties = training_map[min(training_map)]
        print('Best Model train error: {} | Best Model test error: {}'.format(
            round(best_model_properties['training_rmse'], 7),
            round(best_model_properties['testing_rmse'], 7)))
        return best_model_properties
    return None
Ejemplo n.º 31
0
def training_add():
        if request.method == 'POST':
            data=json.loads(request.get_json())


            red_c_gs=data['red_c_gs']
            green_c_gs=data['green_c_gs']
            blue_c_gs=data['blue_c_gs']
            color_c=data['color_c']
            background_type=data['background_type']
            num_layers=data['num_layers']




            t=training(red_c_gs,green_c_gs,blue_c_gs,color_c,background_type,num_layers)
            q=t.add(t)
            return("hi")

        else:
            return("no")
Ejemplo n.º 32
0
def run_training():
    train_dir = "/home/tcd/PycharmProject/Study_tensorflow/cats_vs_dogs/data/train"
    logs_train_dir = "/home/tcd/PycharmProject/Study_tensorflow/cats_vs_dogs/logs"
    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(
        train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                _, tra_loss, tra_acc = sess.run(
                    [train_op, train_loss, train_acc])
                if step % 100 == 0:
                    print(
                        'Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)
                if step % 500 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   "model.ckpt")
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
    sess.close()
Ejemplo n.º 33
0
    images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))

    # ラベルを入れるためのTensor(3(NUM_CLASSES)次元のラベルが任意の枚数(None)分入る)
    labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))

    # dropout率を入れる仮のTensor
    keep_prob = tf.placeholder("float")

    # inference()を呼び出してモデルを作る
    logits = model.inference(images_placeholder, keep_prob)

    # loss()を呼び出して損失を計算
    loss_value = model.loss(logits, labels_placeholder)

    # training()を呼び出して訓練して学習モデルのパラメーターを調整する
    train_op = model.training(loss_value, FLAGS.learning_rate)

    # 精度の計算
    acc = model.accuracy(logits, labels_placeholder)

    # 保存の準備
    saver = tf.train.Saver()

    # Sessionの作成(TensorFlowの計算は絶対Sessionの中でやらなきゃだめ)
    sess = tf.Session()

    # 変数の初期化(Sessionを開始したらまず初期化)
    sess.run(tf.global_variables_initializer())

    # TensorBoard表示の設定(TensorBoardの宣言的な?)
    summary_op = tf.summary.merge_all()
def run_training():
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.
  data_sets = tf_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size)

    # Build a Graph that computes predictions from the inference model.
    logits = model.inference(images_placeholder,
                             FLAGS.hidden1,
                             FLAGS.hidden2)

    # Add to the Graph the Ops for loss calculation.
    loss = model.loss(logits, labels_placeholder)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = model.training(loss, FLAGS.learning_rate)

    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = model.evaluation(logits, labels_placeholder)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)

    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                            graph_def=sess.graph_def)

    # And then after everything is built, start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = fill_feed_dict(data_sets.train,
                                 images_placeholder,
                                 labels_placeholder)

      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)

      duration = time.time() - start_time

      # Write the summaries and print an overview fairly often.
      if step % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, FLAGS.train_dir, global_step=step)
        # Evaluate against the training set.
        print('Training Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.train)
        # Evaluate against the validation set.
        print('Validation Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.validation)
        # Evaluate against the test set.
        print('Test Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.test)