def train(self,
           X,
           Y,
           testX,
           testY,
           ckpt_dir=None,
           n_epoch=10,
           log_path='./log_dir'):
     ckpt_path = os.path.join(ckpt_dir, "wavenet.ckpt")
     best_ckpt_path = os.path.join(ckpt_dir, "best_wavenet.ckpt")
     trainop = tflearn.TrainOp(loss=self.cost,
                               optimizer=self.optimizer_op,
                               metric=self.accuracy,
                               batch_size=self.batch_size)
     trainer = tflearn.Trainer(train_ops=trainop,
                               best_checkpoint_path=best_ckpt_path,
                               checkpoint_path=ckpt_path,
                               keep_checkpoint_every_n_hours=0.5,
                               tensorboard_dir=log_path,
                               tensorboard_verbose=0,
                               session=self.sess)
     self.sess.run(tf.global_variables_initializer())
     trainer.fit({
         self.input_: X,
         self.targets: Y
     },
                 val_feed_dicts={
                     self.input_: testX,
                     self.targets: testY
                 },
                 n_epoch=n_epoch,
                 show_metric=True)
예제 #2
0
    def return_trainer(self, input_x, optimizer, batch_size):
        # encode
        self.mu, self.logvar = self.encode(input_x)
        # sampling
        z = self.sample(self.mu, self.logvar)
        # decode
        self.x_hat = self.decode(z)

        # calculate loss
        regularization_loss = self.calculate_regularization_loss(
            self.mu, self.logvar)
        reconstruction_loss = self.calculate_reconstruction_loss(
            self.x_hat, input_x)
        target = tf.reduce_mean(
            tf.add(regularization_loss, reconstruction_loss))

        # define trainer
        trainop = tflearn.TrainOp(loss=target,
                                  optimizer=optimizer,
                                  batch_size=batch_size,
                                  name='vae_trainer')

        trainer = tflearn.Trainer(train_ops=trainop,
                                  tensorboard_dir=TENSORBOARD_DIR,
                                  tensorboard_verbose=3,
                                  checkpoint_path=CHECKPOINT_PATH,
                                  max_checkpoints=1)
        return trainer
예제 #3
0
    def init_trainer(self, tensorboard_dir="", save_ckpt_path=""):
        if tensorboard_dir == "":
            tensorboard_dir = self.tensorboard_dir
        if save_ckpt_path == "":
            save_ckpt_path = self.checkpoint_path
        train_op = self.graph_ops["train_op"]

        self.trainer = tflearn.Trainer(train_ops=train_op,
                                       tensorboard_dir=tensorboard_dir,
                                       checkpoint_path=save_ckpt_path,
                                       max_checkpoints=3,
                                       tensorboard_verbose=2)
예제 #4
0
def define_trainer(target, optimizer):
    trainop = tflearn.TrainOp(loss=target,
                              optimizer=optimizer,
                              batch_size=batch_size,
                              metric=None,
                              name='vae_trainer')

    trainer = tflearn.Trainer(train_ops=trainop,
                              tensorboard_dir=TENSORBOARD_DIR,
                              tensorboard_verbose=3,
                              checkpoint_path=CHECKPOINT_PATH,
                              max_checkpoints=1)
    return trainer
예제 #5
0
    def _build_training_model(self):
        self.train_data = tflearn.input_data(shape=[None, *self.input_dim], name='train_data')
        self.train_data_ref = tflearn.input_data(shape=[None, self.ref_dim], name='train_data_ref')
        self.curr_batch_size = tf.shape(self.train_data)[0]
        cmb_train_data = tf.concat([self.train_data, self.train_data_ref], 1)
        z_mean, z_std = self._encode(cmb_train_data, True)
        z_sampled = self._sample_z(z_mean, z_std)
        cmb_z_ref = tf.concat([z_sampled, self.train_data_ref], 1)
        recon_data = self._decode(cmb_z_ref, True)

        loss = self._compute_latent_loss(z_mean, z_std) + self._compute_recon_loss(recon_data, self.train_data)
        optimizer = tflearn.optimizers.Adam(self.learning_rate).get_tensor()
        trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer, batch_size=self.batch_size, name='VAE_trainer')
        self.training_model = tflearn.Trainer(train_ops=trainop, tensorboard_dir=self.log_dir)
예제 #6
0
def build_gan_trainer():

    target = None

    # Place Holder
    input_x = tflearn.input_data(shape=(None, X_SIZE), name="input_x")
    input_z = tflearn.input_data(shape=(None, Z_SIZE), name="input_z")

    # Generator
    G_sample = build_generator(input_z, scope=G_SCOPE)
    target = G_sample

    # Discriminator
    D_origin = build_discriminator(input_x, scope=D_SCOPE)
    D_fake = build_discriminator(G_sample, scope=D_SCOPE, reuse=True)

    # Loss
    D_loss = -tf.reduce_mean(tf.log(D_origin) + tf.log(1. - D_fake))
    G_loss = -tf.reduce_mean(tf.log(D_fake))

    # Optimizer
    G_opt = tflearn.Adam(learning_rate=0.001).get_tensor()
    D_opt = tflearn.Adam(learning_rate=0.001).get_tensor()

    # Vars
    G_vars = get_trainable_variables(G_SCOPE)
    D_vars = get_trainable_variables(D_SCOPE)

    # TrainOp
    G_train_op = tflearn.TrainOp(loss=G_loss,
                                 optimizer=G_opt,
                                 batch_size=BATCH_SIZE,
                                 trainable_vars=G_vars,
                                 name="Generator")

    D_train_op = tflearn.TrainOp(loss=D_loss,
                                 optimizer=D_opt,
                                 batch_size=BATCH_SIZE,
                                 trainable_vars=D_vars,
                                 name="Discriminator")

    # Trainer
    gan_trainer = tflearn.Trainer(
        [D_train_op, G_train_op],
        tensorboard_dir=TENSORBOARD_DIR,
        #                                  checkpoint_path=CHECKPOINT_DIR,  # エラー
        max_checkpoints=1)

    return gan_trainer, target
def run():
    # model variables
    X = tf.placeholder('float', [None, 784])
    Y = tf.placeholder('float', [None, 10])

    W1 = tf.Variable(tf.random_normal([784, 256]))
    W2 = tf.Variable(tf.random_normal([256, 256]))
    W3 = tf.Variable(tf.random_normal([256, 10]))
    b1 = tf.Variable(tf.random_normal([256]))
    b2 = tf.Variable(tf.random_normal([256]))
    b3 = tf.Variable(tf.random_normal([10]))

    def dnn(x):
        # using tflearn PReLU activation ops
        x = tflearn.prelu(tf.add(tf.matmul(x, W1), b1))
        tflearn.summaries.monitor_activation(x)  # Monitor activation
        x = tflearn.prelu(tf.add(tf.matmul(x, W2), b2))
        tflearn.summaries.monitor_activation(x)  # Monitor activation
        x = tf.nn.softmax(tf.add(tf.matmul(x, W3), b3))
        return x

    net = dnn(X)

    # use objective ops from TFLearn
    loss = tflearn.categorical_crossentropy(net, Y)
    # use metric ops from TFLearn
    acc = tflearn.metrics.accuracy_op(net, Y)
    # use SGF Optimizer class from TFLearn
    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above `Optimizer` class as `DNN` optimizer arg is enough).
    step = tflearn.variable('step', initializer='zeros', shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Use TFLearn Trainer
    # def training op for backprop
    trainop = tflearn.TrainOp(loss=loss, optimizer=optim_tensor,
                              metric=acc, batch_size=128,
                              step_tensor=step)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=3)
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={
                X: testX, Y: testY}, n_epoch=2, show_metric=True)
예제 #8
0
            name="acc")

    # construct two varaibles to add as additional "valiation monitors"
    # these varaibles are evaluated each time validation happens (eg at a snapshot)
    # and the results are summarized and output to the tensorboard events file,
    # together with the accuracy and loss plots.
    #
    # Here, we generate a dummy variable given by the sum over the current
    # network tensor, and a constant variable.  In practice, the validation
    # monitor may present useful information, like confusion matrix
    # entries, or an AUC metric.
    with tf.name_scope('CustomMonitor'):
        test_var = tf.reduce_sum(tf.cast(net, tf.float32), name="test_var")
        test_const = tf.constant(32.0, name="custom_constant")
        # Define a train op
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                            validation_monitors=[test_var, test_const],
                            metric=accuracy, batch_size=128)

    # Tensorboard logs stored in /tmp/tflearn_logs/. Using verbose level 2.
    trainer = tflearn.Trainer(train_ops=trainop,
                              tensorboard_dir='/tmp/tflearn_logs/',
                              tensorboard_verbose=2)
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},
                n_epoch=10, show_metric=True, run_id='Summaries_example')

    # Run the following command to start tensorboard:
    # >> tensorboard /tmp/tflearn_logs/
    # Navigate with your web browser to http://0.0.0.0:6006/
예제 #9
0
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above 'Optimizer' class as 'DNN' optimizer arg is enough).
    step = tflearn.variable("step", initializer='zeros', shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss,
                              optimizer=optim_tensor,
                              metric=acc,
                              batch_size=128,
                              step_tensor=step)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, varibles etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    # Training for 10 epochs.
    trainer.fit({
        X: trainX,
        Y: trainY
    },
                val_feed_dicts={
                    X: testX,
                    Y: testY
                },
                n_epoch=10,
                show_metric=True)
예제 #10
0
        # together with the accuracy and loss plots.
        #
        # Here, we generate a dummy variable given by the sum over the current
        # network tensor, and a constant variable.  In practice, the validation
        # monitor may present useful information, like confusion matrix
        # entries, or an AUC metric.
    with tf.name_scope('CustomMonitor'):
        test_var = tf.reduce_mean(tf.cast(net, tf.float32), name='test_var')
        test_const = tf.constant(32, name='custom_constant')

    trainOp = tflearn.TrainOp(loss=loss,
                              optimizer=optimizer,
                              validation_monitors=[test_var, test_const],
                              metric=accuracy,
                              batch_size=128)

    trainer = tflearn.Trainer(train_ops=trainOp,
                              tensorboard_dir="./tmp/tflearn_logs/",
                              tensorboard_verbose=2)
    trainer.fit({
        X: train_X,
        Y: train_Y
    },
                val_feed_dicts={
                    X: test_X,
                    Y: test_Y
                },
                n_epoch=10,
                show_metric=True,
                run_id='Summaries_example')
def run():
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])  # batch, height, width, chnl

    # 32 filters, each of size 3(x3)
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    # pool kernel size 2, stride size default kernel soze
    net = tflearn.max_pool_2d(net, 2)
    # for "encourage some kind of inhibition and boost the neurons with
    # relatively larger activations"
    net = tflearn.local_response_normalization(net)
    # The dropout method is introduced to prevent overfitting. At each training stage, individual nodes are either "dropped out" of the net with probability {\displaystyle 1-p} 1-p or kept with probability {\displaystyle p} p, so that a reduced network is left
    # keep_prob=0.8
    net = tflearn.dropout(net, 0.8)

    # 64 filters
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)

    # FC
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='softmax')

    # --------------------------------------
    # really manual tf way
    # # Defining other ops using Tensorflow
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    # optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    # optimizer_minop = optimizer.minimize(loss)

    # # start
    # init = tf.initialize_all_variables()

    # with tf.Session() as sess:
    #     sess.run(init)
    #     batch_size = 128
    #     for epoch in range(2):
    #         avg_cost = 0.
    #         total_batch = int(mnist_data.train.num_examples/batch_size)
    #         for i in range(total_batch):
    #             batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
    #             sess.run(optimizer_minop, feed_dict={X: batch_xs, Y: batch_ys})
    #             cost = sess.run(loss, feed_dict={X: batch_xs, Y: batch_ys})
    #             avg_cost += cost/total_batch
    #             if i % 20 == 0:
    #                 print("Epoch:", '%03d' % (epoch+1), "Step:", '%03d' % i,
    #                       "Loss:", str(cost))

    # --------------------------------------
    # use trainer class
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    accuracy = tf.reduce_mean(tf.cast(
        tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),
                              name='acc')

    trainop = tflearn.TrainOp(loss=loss,
                              optimizer=optimizer,
                              metric=accuracy,
                              batch_size=128)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    trainer.fit({
        X: trainX,
        Y: trainY
    },
                val_feed_dicts={
                    X: testX,
                    Y: testY
                },
                n_epoch=2,
                show_metric=True)
    trainer.save('models/mnist_cnn.tfl')
예제 #12
0
def test_run():
    # X, Y = get_data(FLAGS.dataset)
    # np.save('Xdatas.npy', X)
    # np.save('Ylables.npy', Y)
    X = np.array(np.load('Xdatas.npy'), dtype=np.float32)
    Y = np.array(np.load('Ylabels.npy'), dtype=np.float32)
    with tf.Graph().as_default(), tf.Session() as sess:
        embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish)
        with tf.variable_scope('mymodel'):
            num_units = 100
            fc = slim.fully_connected(embeddings, num_units)

            logits = slim.fully_connected(fc,
                                          _NUM_CLASSES,
                                          activation_fn=None,
                                          scope='logits')
            tf.sigmoid(logits, name='prediction')

            with tf.variable_scope('train'):
                global_step = tf.Variable(0,
                                          name='global_step',
                                          trainable=False,
                                          collections=[
                                              tf.GraphKeys.GLOBAL_VARIABLES,
                                              tf.GraphKeys.GLOBAL_STEP
                                          ])

                labels = tf.placeholder(tf.float32,
                                        shape=(None, _NUM_CLASSES),
                                        name='labels')

                # Cross-entropy label loss
                xent = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
                                                               labels=labels,
                                                               name='xent')
                loss = tf.reduce_mean(xent, name='loss_op')
                tf.summary.scalar('loss', loss)

                optimizer = tf.train.AdamOptimizer(
                    learning_rate=vggish_params.LEARNING_RATE,
                    epsilon=vggish_params.ADAM_EPSILON)
                optimizer.minimize(loss,
                                   global_step=global_step,
                                   name='train_op')

        features_tensor = sess.graph.get_tensor_by_name(
            vggish_params.INPUT_TENSOR_NAME)
        labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0')

        accuracy = tf.reduce_mean(tf.cast(
            tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels_tensor, 1)),
            tf.float32),
                                  name='acc')

        trainOP = tflearn.TrainOp(loss=loss,
                                  optimizer=optimizer,
                                  metric=accuracy,
                                  batch_size=128)
        trainer = tflearn.Trainer(
            train_ops=trainOP,
            tensorboard_verbose=0,
            tensorboard_dir='./logs',
            best_checkpoint_path='./out_model/vggish_model',
            session=sess)
        trainer.fit({
            features_tensor: X,
            labels_tensor: Y
        },
                    n_epoch=1000,
                    val_feed_dicts=0.1,
                    shuffle_all=True)
예제 #13
0
    with tf.name_scope("optimizer"):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

    with tf.name_scope("accuracy"):
        predict_op = tf.argmax(pred, 1)
        correct_prediction = tf.equal(tf.argmax(Y, 1), predict_op)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    trainop = tflearn.TrainOp(
        loss=cost, optimizer=optimizer, metric=accuracy, batch_size=128
    )  #http://tflearn.org/helpers/trainer/ --> go to this link for more details

    trainer = tflearn.Trainer(train_ops=trainop,
                              tensorboard_verbose=0,
                              tensorboard_dir="/tmp/tflearn_logs/example1",
                              checkpoint_path="model.lstm",
                              max_checkpoints=2)

    trainer.fit({
        X: trX,
        Y: trY
    },
                val_feed_dicts={
                    X: valX,
                    Y: valY
                },
                n_epoch=2,
                show_metric=True)

    #tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2