예제 #1
0
def train(x,
          training,
          reconstruction_loss,
          training_op,
          n_epochs=10,
          batch_size=150,
          check_interval=100,
          model_name='chap_15_5.ckpt'):
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(logdir=get_logdir(),
                                   graph=tf.get_default_graph())
    loss_summary = tf.summary.scalar('loss', reconstruction_loss)
    validation_dict = {x: mnist.validation.images}

    with tf.Session() as sess:
        init.run()
        n_batches = mnist.train.num_examples // batch_size

        for epoch in range(n_epochs):
            for batch_index in range(n_batches):
                x_batch, _ = mnist.train.next_batch(batch_size)
                sess.run(training_op, feed_dict={x: x_batch, training: True})

                if batch_index % check_interval == 0:
                    sys.stdout.flush()
                    print("loss :",
                          reconstruction_loss.eval(feed_dict=validation_dict))
                    writer.add_summary(
                        loss_summary.eval(feed_dict=validation_dict),
                        epoch * n_batches + batch_index)

        saver.save(sess, model_name)
        writer.close()
def train(n_epoches=5,
          batch_size=150,
          save_model_name='models/chap_15_3_2.ckpt'):
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(logdir=get_logdir(),
                                   graph=tf.get_default_graph())
    loss_summary = tf.summary.scalar('loss', loss)
    n_batches = mnist.train.num_examples // batch_size

    with tf.Session() as sess:
        init.run()
        for epoch in range(n_epoches):
            for iteration in range(n_batches):
                x_batch, _ = mnist.train.next_batch(batch_size)
                sess.run(training_op, feed_dict={x: x_batch})

                if iteration % 100 == 0:
                    loss_val = loss_summary.eval(feed_dict={x: x_batch})
                    writer.add_summary(loss_val, epoch * n_batches + iteration)
                    print(
                        "step: ", epoch * n_batches + iteration, "loss : ",
                        loss.eval(feed_dict={
                            x: mnist.test.next_batch(batch_size)[0]
                        }))

        saver.save(sess, save_model_name)
        writer.close()
예제 #3
0
def train(n_epoches=None,
          batch_size=None,
          save_model_name='models/chap_15_3_3.ckpt'):
    if n_epoches is None:
        n_epoches = [5, 5]

    if batch_size is None:
        batch_size = [150, 150]

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(logdir=get_logdir(), graph=tf.get_default_graph())

    training_op = [phase1_training_op, phase2_training_op]
    reconstruction_loss = [phase1_reconstruct_loss, phase2_reconstruct_loss]
    test_loss_summary = tf.summary.scalar('loss', eval_reconstruction_loss)

    with tf.Session() as sess:
        init.run()
        for phase in range(2):
            # phaseごとの初期化処理
            n_batches = mnist.train.num_examples // batch_size[phase]
            print(f'phase {phase+1} start')
            if phase == 1:
                hidden1_cache = hidden1.eval(feed_dict={x: mnist.train.images})  # キャッシュ

            for epoch in range(n_epoches[phase]):
                for iteration in range(n_batches):
                    if phase == 1:
                        indices = np.random.permutation(mnist.train.num_examples)  # ランダムでindexを選択する
                        hidden1_batch = hidden1_cache[indices[:batch_size[phase]]]
                        feed_dict = {hidden1: hidden1_batch}

                    else:
                        x_batch, _ = mnist.train.next_batch(batch_size[phase])
                        feed_dict = {x: x_batch}

                    sess.run(training_op[phase], feed_dict=feed_dict)

                loss_train = reconstruction_loss[phase].eval(feed_dict=feed_dict)
                print(epoch, " reconstruction loss:", loss_train)

                loss_test = eval_reconstruction_loss.eval({x: mnist.test.images})
                writer.add_summary(test_loss_summary.eval({x: mnist.test.images}),
                                   global_step=phase * n_epoches[phase] + epoch)
                print("TestMSE: ", loss_test)

        saver.save(sess, save_model_name)
        writer.close()
예제 #4
0
def train(n_epoches=None,
          batch_size=None,
          save_model_name='models/chap_15_4.ckpt'):
    if n_epoches is None:
        n_epoches = [1, 1]

    if batch_size is None:
        batch_size = [150, 150]

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(logdir=get_logdir(),
                                   graph=tf.get_default_graph())

    with tf.Session() as sess:
        init.run()
        for phase in range(2):
            print(f"phase {phase} start")
            n_batches = mnist.train.num_examples // batch_size[phase]
            if phase == 1:
                hidden2_cache = hidden2.eval({x: mnist.train.images})

            for epoch in range(n_epoches[phase]):
                for iteration in range(n_batches):
                    if phase == 0:
                        x_batch, _ = mnist.train.next_batch(batch_size[phase])
                        feed_dict = {x: x_batch}
                    else:
                        induces = np.random.permutation(
                            mnist.train.num_examples)[:batch_size[phase]]
                        hidden2_batch = hidden2_cache[
                            induces][:batch_size[phase]]
                        y_batch = mnist.train.labels[
                            induces][:batch_size[phase]]
                        feed_dict = {hidden2: hidden2_batch, y: y_batch}

                    sess.run(training_op[phase], feed_dict=feed_dict)

                print(
                    f'{epoch} epoch classify loss :'
                    f'{classify_loss.eval(feed_dict={x: mnist.train.images, y: mnist.train.labels})}'
                )
        print(
            f"accuracy: {accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels})}"
        )
        saver.save(sess, save_model_name)
        writer.close()
def inception_net(
        ckpt="inception_v3_2016_08_28/inception_v3.ckpt",
        n_epochs=1000,
        n_batchs=20,
        batch_size=32 * 16,
        validation_batch_size=32 * 16,
        check_batch_interval=5,
        max_checks_without_progress=10,
        save_path="models/chap13_Q9.ckpt"):
    x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name='x')
    y = tf.placeholder(tf.int32, shape=[None], name='y')

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        # (logits, 層の情報の集合)
        inception.inception_v3(x, num_classes=1001, is_training=False)

        # 新しい層を組む前にload用のsaverを作成すること
        # saverを呼び出した時点でload対象の変数scopeを決定する
        inception_saver = tf.train.Saver()

        pre_logits = tf.get_default_graph().get_tensor_by_name('InceptionV3/Logits/Dropout_1b/Identity:0')
        # 凍結
        pre_logits_stop = tf.squeeze(tf.stop_gradient(pre_logits), [1, 2])
        logits = tf.layers.dense(pre_logits_stop, 5,
                                 kernel_initializer=tf.keras.initializers.he_uniform(seed),
                                 name='flower_logits')

    with tf.name_scope('loss'):
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
        loss = tf.reduce_mean(xentropy)
        optimizer = tf.train.AdamOptimizer()
        train_op = optimizer.minimize(loss)

    with tf.name_scope('eval'):
        pred_y = tf.nn.in_top_k(logits, y, 1)
        accuracy = tf.reduce_mean(tf.cast(pred_y, tf.float32), name='accuracy')

    # 実行フェーズ
    saver = tf.train.Saver()
    loss_summary = tf.summary.scalar('loss', loss)

    init = tf.global_variables_initializer()
    writer = tf.summary.FileWriter(get_logdir(), tf.get_default_graph())

    # early_stopping用の変数
    best_loss_val = np.infty
    checks_since_last_progress = 0
    best_model_params = None

    # 画像のデータ作成
    data_set = FlowerDataSet(batch_size=batch_size, validate_batch_size=validation_batch_size)

    with tf.Session() as sess:
        init.run()
        inception_saver.restore(sess, ckpt)

        print('train start')
        for epoch in range(n_epochs):
            for batch_index in range(n_batchs):
                x_batch, y_batch = next(data_set.train_data_generator)
                sess.run(train_op, feed_dict={x: x_batch, y: y_batch})

                if batch_index % check_batch_interval == 0:
                    print(epoch * n_batchs + batch_index, ' step', ' best loss :', best_loss_val)
                    x_validate, y_validate = next(data_set.validate_data_generator)
                    validate_feed_dict = {x: x_validate, y: y_validate}
                    loss_val = loss.eval(feed_dict=validate_feed_dict)
                    # print(epoch, ' epoch accuracy :', accuracy.eval(feed_dict=validate_feed_dict))
                    writer.add_summary(loss_summary.eval(validate_feed_dict), epoch * n_batchs + batch_index)

                    # update
                    if loss_val < best_loss_val:
                        best_loss_val = loss_val
                        best_model_params = get_model_params()
                        checks_since_last_progress = 0
                    else:
                        checks_since_last_progress += 1

            # epochの最終時のステップ
            x_validate, y_validate = next(data_set.validate_data_generator)
            validate_feed_dict = {x: x_validate, y: y_validate}
            print(epoch, ' epoch accuracy :', accuracy.eval(feed_dict=validate_feed_dict))
            print('test data accuracy :', accuracy.eval(feed_dict={x: data_set.x_test, y: data_set.y_test}))

            if checks_since_last_progress > max_checks_without_progress:
                print('early stopping')
                break

        if best_model_params:
            restore_model_params(best_model_params)

        print('test data accuracy :', accuracy.eval(feed_dict={x: data_set.x_test, y: data_set.y_test}))
        saver.save(sess, save_path)
    graph = tf.get_default_graph()

    layers = [
        op.name for op in graph.get_operations()
        if op.type == 'Conv2D' and 'import/' in op.name
    ]
    feature_nums = [
        int(graph.get_tensor_by_name(name + ':0').get_shape()[-1])
        for name in layers
    ]

    # Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
    # to have non-zero gradients for features with negative initial activations.
    # ReLuでカットされる特徴も抽出するためpre_reluを用いる
    def render(
            layer='mixed4d_3x3_bottleneck_pre_relu',  # 可視化するlayer名
            channel=139  # 何番目のchannelを取得するか
    ):
        t_layer_channel = T(layer)
        img0 = PIL.Image.open('mountain.jpeg')
        img0 = np.float32(img0)
        render_lapnorm(tf.square(T('mixed4c')))
        render_deepdream(tf.square(T('mixed4c')), img0=img0)

    with tf.Session() as sess:
        render()

    writer = tf.summary.FileWriter(get_logdir(), tf.get_default_graph())
    writer.close()
예제 #7
0
def dnn(train_x, train_y,
        validate_x, validate_y,
        n_epochs=5,
        batch_size=25,
        n_inputs=28 * 28,
        n_outputs=5,
        save_path='./models/chap_11_Q8_num_0to4.ckpt'):
    # 構築フェーズ
    x = tf.placeholder(tf.float32, shape=(None, n_inputs), name='x')
    y = tf.placeholder(tf.int32, shape=(None), name='y')

    # variable含むレイヤ作成するときはvariable_scopeのほうがいいかもしれない
    with tf.variable_scope('network'):
        init_he = tf.contrib.layers.variance_scaling_initializer()
        my_hidden_layer = partial(tf.layers.dense, activation=tf.nn.elu, kernel_initializer=init_he)
        hidden1 = my_hidden_layer(x, 100, name='hidden1')
        hidden2 = my_hidden_layer(hidden1, 100, name='hidden2')
        hidden3 = my_hidden_layer(hidden2, 100, name='hidden3')
        hidden4 = my_hidden_layer(hidden3, 100, name='hidden4')
        hidden5 = my_hidden_layer(hidden4, 100, name='hidden5')
        logit = tf.layers.dense(hidden5, n_outputs, name='outputs')

    with tf.name_scope('loss'):
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logit)
        loss = tf.reduce_mean(xentropy, name='loss')

    with tf.name_scope('optimize'):
        optimizer = tf.train.AdamOptimizer()
        training_op = optimizer.minimize(loss)

    with tf.name_scope('eval'):
        correct = tf.nn.in_top_k(logit, y, 1)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name='accuracy')

    data_length = train_y.shape[0]

    def fetch_batch(epoch, batch_index, batch_size):
        np.random.seed(epoch * batch_size + batch_index)
        indices = np.random.randint(data_length, size=batch_size)
        x_batch = train_x[indices]
        y_batch = train_y[indices]
        return x_batch, y_batch

    saver = tf.train.Saver()
    loss_summary = tf.summary.scalar('loss', loss)
    writer = tf.summary.FileWriter(get_logdir(), graph=tf.get_default_graph())

    init = tf.global_variables_initializer()
    n_batches = data_length // batch_size

    with  tf.Session() as sess:
        init.run()
        validate_dict = {x: validate_x, y: validate_y}

        for epoch in range(n_epochs):
            for batch_index in range(n_batches):
                x_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
                sess.run(training_op, feed_dict={x: x_batch, y: y_batch})
                writer.add_summary(loss_summary.eval(feed_dict=validate_dict), epoch * n_batches + batch_index)
            print(epoch, ' epoch accuracy:', accuracy.eval(feed_dict=validate_dict))
        saver.save(sess, save_path)
    writer.close()