Пример #1
0
    def _compute_accuracy(self, inputs, outputs, dataset):
        nc = 0
        num_left = dataset.get_num_examples()
        set_is_training(outputs, False)
        loss = 0
        while num_left > 0:
            X_batch, y_batch = dataset.next_batch(self.batch_size)
            X = tf.convert_to_tensor(X_batch, np.float32)  #.gpu()
            y = tf.convert_to_tensor(y_batch, np.float32)  #.gpu()

            co.forward({inputs['in']: X})
            logits = outputs['out'].val

            correct_prediction = tf.equal(tf.argmax(logits, 1),
                                          tf.argmax(y, 1))
            num_correct = tf.reduce_sum(tf.cast(correct_prediction, "float"))
            loss += tf.reduce_sum(
                tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=y))
            nc += num_correct

            # update the number of examples left.
            eff_batch_size = y_batch.shape[0]
            num_left -= eff_batch_size
        acc = old_div(float(nc), dataset.get_num_examples())
        loss = old_div(float(loss), dataset.get_num_examples())

        return acc, loss
Пример #2
0
    def evaluate(self, inputs, outputs):
        params = M.get_collection()
        optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
        num_batches = int(len(self.train_dataset) / self.batch_size)
        for epoch in range(self.max_num_training_epochs):
            random.shuffle(self.train_dataset)
            i = 0
            total_loss = 0
            while (i < len(self.train_dataset)):
                dy.renew_cg()
                mbsize = min(self.batch_size, len(self.train_dataset) - i)
                minibatch = self.train_dataset[i:i + mbsize]
                losses = []
                for (label, img) in minibatch:
                    x = dy.inputVector(img)
                    co.forward({inputs['in']: x})
                    logits = outputs['out'].val
                    loss = dy.pickneglogsoftmax(logits, label)
                    losses.append(loss)
                mbloss = dy.esum(losses) / mbsize
                mbloss.backward()
                optimizer.update()
                total_loss += mbloss.scalar_value()
                i += mbsize

            val_acc = self.compute_accuracy(inputs, outputs)
            if self.log_output_to_terminal and epoch % self.display_step == 0:
                print("epoch:", '%d' % (epoch + 1), "loss:",
                      "{:.9f}".format(total_loss / num_batches),
                      "validation_accuracy:", "%.5f" % val_acc)

        val_acc = self.compute_accuracy(inputs, outputs)
        return {'val_acc': val_acc}
Пример #3
0
    def evaluate(self, inputs, outputs):
        tf.keras.backend.clear_session()
        tf.reset_default_graph()

        (x_train, y_train) = self.train_dataset

        X = tf.keras.layers.Input(x_train[0].shape)
        co.forward({inputs['in']: X})
        logits = outputs['out'].val
        probs = tf.keras.layers.Softmax()(logits)
        model = tf.keras.models.Model(inputs=[inputs['in'].val],
                                      outputs=[probs])
        optimizer = tf.keras.optimizers.Adam(lr=self.learning_rate)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        history = model.fit(x_train,
                            y_train,
                            batch_size=self.batch_size,
                            epochs=self.max_num_training_epochs,
                            validation_split=self.val_split)

        results = {'val_acc': history.history['val_acc'][-1]}
        return results
Пример #4
0
 def _compute_loss(self, inputs, outputs, X, y, loss_metric):
     X = tf.constant(X).gpu()
     y = tf.constant(y).gpu()
     co.forward({inputs['in']: X})
     logits = outputs['out'].val
     loss = tf.reduce_mean(
         tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
     loss_metric(loss)
     return loss
Пример #5
0
 def forward(self, input_name_to_val):
     input_to_val = {
         self.inputs[name]: val for (name, val) in input_name_to_val.items()
     }
     co.forward(input_to_val, self._module_seq)
     output_name_to_val = {
         name: ox.val for (name, ox) in self.outputs.items()
     }
     return output_name_to_val
Пример #6
0
    def evaluate(self, inputs, outputs):
        tf.reset_default_graph()

        X_pl = tf.placeholder("float", [None] + self.in_dim)
        y_pl = tf.placeholder("float", [None, self.num_classes])
        lr_pl = tf.placeholder("float")
        co.forward({inputs['in']: X_pl})
        logits = outputs['out'].val
        train_feed, eval_feed = htf.get_feed_dicts(outputs)

        # define loss and optimizer
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                    labels=y_pl))
        optimizer = tf.train.AdamOptimizer(learning_rate=lr_pl)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            optimizer = optimizer.minimize(loss)

        # for computing the accuracy of the model
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_pl, 1))
        num_correct = tf.reduce_sum(tf.cast(correct_prediction, "float"))

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)

            num_batches = int(self.train_dataset.get_num_examples() /
                              self.batch_size)
            for epoch in range(self.num_training_epochs):
                avg_loss = 0.
                for _ in range(num_batches):
                    X_batch, y_batch = self.train_dataset.next_batch(
                        self.batch_size)
                    train_feed.update({
                        X_pl: X_batch,
                        y_pl: y_batch,
                        lr_pl: self.learning_rate
                    })

                    _, c = sess.run([optimizer, loss], feed_dict=train_feed)
                    avg_loss += c / num_batches

                if self.log_output_to_terminal and epoch % self.display_step == 0:
                    print("epoch:", '%d' % (epoch + 1), "loss:",
                          "{:.9f}".format(avg_loss))

            val_acc = self.compute_accuracy(sess, X_pl, y_pl, num_correct,
                                            self.val_dataset, eval_feed)
            print("validation accuracy: %0.4f" % val_acc)
            results = {
                'validation_accuracy': val_acc,
                'num_parameters': htf.get_num_trainable_parameters()
            }

        return results
Пример #7
0
 def compute_accuracy(self, inputs, outputs):
     correct = 0
     for (label, img) in self.val_dataset:
         dy.renew_cg()
         x = dy.inputVector(img)
         co.forward({inputs['in']: x})
         logits = outputs['out'].val
         pred = np.argmax(logits.npvalue())
         if (label == pred): correct += 1
     return (1.0 * correct / len(self.val_dataset))
Пример #8
0
    def forward(self, input_name_to_val):
        """Forward computation of the module that is represented through the
        graph of DeepArchitect modules.
        """

        input_to_val = {
            ix: input_name_to_val[name]
            for (name, ix) in self.inputs.items()
        }
        co.forward(input_to_val, self._module_seq)
        output_name_to_val = {
            name: ox.val
            for (name, ox) in self.outputs.items()
        }
        return output_name_to_val
Пример #9
0
    def evaluate(self, inputs, outputs):
        keras.backend.clear_session()

        X = Input(self.X_train[0].shape)
        co.forward({inputs['in']: X})
        logits = outputs['out'].val
        probs = Activation('softmax')(logits)

        model = Model(inputs=[inputs['in'].val], outputs=[probs])
        model.compile(optimizer=Adam(lr=self.learning_rate),
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        history = model.fit(self.X_train,
                            self.y_train,
                            batch_size=self.batch_size,
                            epochs=self.num_training_epochs,
                            validation_data=(self.X_val, self.y_val))
        results = {'validation_accuracy': history.history['val_accuracy'][-1]}
        return results
Пример #10
0
    def forward(self, input_name_to_val):
        """Forward computation of the module that is represented through the
        graph of DeepArchitect modules.
        """
        if self._module_seq is None:
            self._module_seq = co.determine_module_eval_seq(
                self.inputs.values())

        input_to_val = {
            ix: input_name_to_val[name]
            for name, ix in iteritems(self.inputs)
        }
        co.forward(input_to_val, self._module_seq)
        output_name_to_val = {
            name: ox.val
            for name, ox in iteritems(self.outputs)
        }

        if not self._is_compiled:
            modules = get_pytorch_modules(self.outputs)
            for i, m in enumerate(modules):
                self.add_module(str(i), m)
            self._is_compiled = True
        return output_name_to_val
Пример #11
0
    def eval(self, inputs, outputs):
        tf.reset_default_graph()

        X_pl = tf.placeholder("float", [None] + self.in_dim)
        y_pl = tf.placeholder("float", [None, self.num_classes])
        lr_pl = tf.placeholder("float")
        co.forward({inputs['in']: X_pl})
        logits = outputs['out'].val
        train_feed, eval_feed = htf.get_feed_dicts(outputs)
        saver = tf.train.Saver()

        # define loss and optimizer
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_pl))
        # chooses the optimizer. (this can be put in a function).
        if self.optimizer_type == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate=lr_pl)
        elif self.optimizer_type == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr_pl)
        elif self.optimizer_type == 'sgd_mom':
            optimizer = tf.train.MomentumOptimizer(learning_rate=lr_pl,
                                                   momentum=0.99)
        else:
            raise ValueError("Unknown optimizer.")
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            optimizer = optimizer.minimize(loss)

        # for computing the accuracy of the model
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_pl, 1))
        num_correct = tf.reduce_sum(tf.cast(correct_prediction, "float"))

        init = tf.global_variables_initializer()
        # Setting the session to allow growth, so it doesn't allocate all GPU memory.
        gpu_ops = tf.GPUOptions(allow_growth=True)
        config = tf.ConfigProto(gpu_options=gpu_ops)
        seqs = ut.SequenceTracker(abort_if_different_lengths=True)
        with tf.Session(config=config) as sess:
            sess.run(init)

            learning_rate_init = self.learning_rate_init
            learning_rate_mult = self.learning_rate_mult
            learning_rate_min = self.learning_rate_min
            stop_patience = self.stop_patience
            learning_rate_patience = self.learning_rate_patience
            save_patience = self.save_patience

            best_val_acc = -np.inf
            best_val_acc_saved = -np.inf
            stop_counter = stop_patience
            rate_counter = learning_rate_patience
            save_counter = save_patience
            timer_manager = ut.TimerManager()
            timer_manager.create_timer('eval')

            # getting the gpu_id based on the environment.
            if gpu_utils.is_environment_variable_defined(
                    'CUDA_VISIBLE_DEVICES'):
                s = gpu_utils.get_environment_variable('CUDA_VISIBLE_DEVICES')
                s_lst = s.split(',')
                if len(s_lst) == 1 and len(s_lst[0]) > 0:
                    gpu_id = int(s_lst[0])
                else:
                    gpu_id = None
            else:
                gpus = gpu_utils.get_gpu_information()
                if len(gpus) == 1:
                    gpu_id = 0
                else:
                    gpu_id = None

            lr = learning_rate_init
            num_batches = int(self.train_dataset.get_num_examples() /
                              self.batch_size)
            for epoch in range(self.max_num_training_epochs):
                avg_loss = 0.
                for _ in range(num_batches):
                    X_batch, y_batch = self.train_dataset.next_batch(
                        self.batch_size)
                    train_feed.update({X_pl: X_batch, y_pl: y_batch, lr_pl: lr})

                    _, c = sess.run([optimizer, loss], feed_dict=train_feed)
                    avg_loss += c / num_batches

                    # if spent more time than budget, exit.
                    if (timer_manager.get_time_since_event(
                            'eval', 'start', 'minutes') >
                            self.max_eval_time_in_minutes):
                        break

                # early stopping
                val_acc = self._compute_accuracy(sess, X_pl, y_pl, num_correct,
                                                 self.val_dataset, eval_feed)

                # Display logs per epoch step
                if self.log_output_to_terminal and epoch % self.display_step == 0:
                    print("time:", "%7.1f" %
                          timer_manager.get_time_since_event('eval', 'start'),
                          "epoch:", '%04d' % (epoch + 1), "loss:",
                          "{:.9f}".format(avg_loss), "validation_accuracy:",
                          "%.5f" % val_acc, "learning_rate:", '%.3e' % lr)

                d = {
                    'validation_accuracy':
                    val_acc,
                    'training_loss':
                    avg_loss,
                    'epoch_number':
                    epoch + 1,
                    'learning_rate':
                    lr,
                    'time_in_minutes':
                    timer_manager.get_time_since_event('eval',
                                                       'start',
                                                       units='minutes'),
                }
                # adding information about gpu utilization if available.
                if gpu_id is not None:
                    gpus = gpu_utils.get_gpu_information()
                    d.update({
                        'gpu_utilization_in_percent':
                        gpus[gpu_id]['gpu_utilization_in_percent'],
                        'gpu_memory_utilization_in_gigabytes':
                        gpus[gpu_id]['gpu_memory_utilization_in_gigabytes']
                    })
                seqs.append(d)

                # update the patience counters.
                if best_val_acc < val_acc:
                    best_val_acc = val_acc
                    # reinitialize all the counters.
                    stop_counter = stop_patience
                    rate_counter = learning_rate_patience
                    save_counter = save_patience
                else:
                    stop_counter -= 1
                    rate_counter -= 1
                    if stop_counter == 0:
                        break

                    if rate_counter == 0:
                        lr = max(lr * learning_rate_mult, learning_rate_min)
                        rate_counter = learning_rate_patience

                    if best_val_acc_saved < val_acc:
                        save_counter -= 1
                        if save_counter == 0:
                            save_path = saver.save(sess, self.model_path)
                            print("Model saved in file: %s" % save_path)

                            save_counter = save_patience
                            best_val_acc_saved = val_acc

                # if spent more time than budget, exit.
                if (timer_manager.get_time_since_event('eval', 'start',
                                                       'minutes') >
                        self.max_eval_time_in_minutes):
                    break

            # if the model saved has better performance than the current model,
            # load it.
            if best_val_acc_saved > val_acc:
                saver.restore(sess, self.model_path)
                print("Model restored from file: %s" % save_path)

            print("Optimization Finished!")

            timer_manager.tick_timer('eval')
            val_acc = self._compute_accuracy(sess, X_pl, y_pl, num_correct,
                                             self.val_dataset, eval_feed)
            t_infer = (
                timer_manager.get_time_since_last_tick('eval', 'miliseconds') /
                self.val_dataset.get_num_examples())

            print("Validation accuracy: %f" % val_acc)
            seqs_dict = seqs.get_dict()
            results = {
                'validation_accuracy': val_acc,
                'num_parameters': float(htf.get_num_trainable_parameters()),
                'inference_time_per_example_in_miliseconds': t_infer,
                'num_training_epochs': seqs_dict['epoch_number'],
                'sequences': seqs_dict
            }
            if 'gpu_utilization_in_percent' in seqs_dict:
                results['average_gpu_utilization_in_percent'] = np.mean(
                    seqs_dict['gpu_utilization_in_percent'])
                results[
                    'average_gpu_memory_utilization_in_gigabytes'] = np.mean(
                        seqs_dict['gpu_memory_utilization_in_gigabytes'])

            if self.test_dataset != None:
                test_acc = self._compute_accuracy(sess, X_pl, y_pl, num_correct,
                                                  self.test_dataset, eval_feed)
                print("Test accuracy: %f" % test_acc)
                results['test_accuracy'] = test_acc

        results['training_time_in_hours'] = timer_manager.get_time_since_event(
            'eval', 'start', units='hours')
        return results
        def model_fn(features, labels, mode, params):
            feature_columns = list(get_feature_columns().values())

            images = tf.feature_column.input_layer(
                features=features, feature_columns=feature_columns)

            images = tf.reshape(images,
                                shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH,
                                       IMAGE_DEPTH))
            set_recompile(outputs, True)
            gc.collect()
            htfe.set_is_training(outputs, mode == tf.estimator.ModeKeys.TRAIN)
            co.forward({inputs['in']: images})
            logits = outputs['out'].val

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            predicted_classes = tf.argmax(logits, 1)
            if mode == tf.estimator.ModeKeys.PREDICT:
                predictions = {
                    'class_ids': predicted_classes[:, tf.newaxis],
                    'probabilities': tf.nn.softmax(logits),
                    'logits': logits,
                }
                return tf.estimator.EstimatorSpec(mode,
                                                  predictions=predictions)
            # define loss and optimizer
            train_vars = tf.trainable_variables()
            with tf.variable_scope('l2'):
                l2_loss = tf.add_n([
                    tf.nn.l2_loss(v) for v in train_vars if 'kernel' in v.name
                ]) * self.weight_decay
            unreg_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=labels))
            loss = unreg_loss + l2_loss
            # Compute evaluation metrics.
            accuracy = tf.metrics.accuracy(labels=tf.argmax(labels, 1),
                                           predictions=predicted_classes,
                                           name='acc_op')
            metrics = {'accuracy': accuracy}
            if mode == tf.estimator.ModeKeys.EVAL:
                loss = tf.Print(loss, [
                    accuracy, l2_loss, unreg_loss, loss,
                    tf.argmax(labels, 1), predicted_classes
                ],
                                summarize=10)
                return tf.estimator.EstimatorSpec(mode,
                                                  loss=loss,
                                                  eval_metric_ops=metrics)

            # Create training op.
            assert mode == tf.estimator.ModeKeys.TRAIN
            step = tf.train.get_or_create_global_step()
            learning_rate = self.get_learning_rate(step)
            optimizer = tf.train.RMSPropOptimizer(learning_rate,
                                                  .9,
                                                  momentum=.9,
                                                  epsilon=1.0)
            loss = tf.Print(loss, [
                accuracy, l2_loss, unreg_loss, loss, learning_rate,
                tf.argmax(labels, 1), predicted_classes
            ],
                            summarize=10)
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(
                    loss, global_step=tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              train_op=train_op)
Пример #13
0
        def model_fn(features, labels, mode, params):
            setRecompile(outputs.values(), True)
            gc.collect()
            htfe.setTraining(outputs.values(),
                             mode == tf.estimator.ModeKeys.TRAIN)
            step = tf.train.get_or_create_global_step()
            if 'In' in inputs:
                co.forward({inputs['In']: features})
                logits = outputs['Out'].val
            else:
                co.forward({
                    inputs['In0']:
                    features,
                    inputs['In1']:
                    tf.math.divide(
                        tf.cast(step, tf.float32),
                        float(self.steps_per_epoch *
                              self.max_num_training_epochs))
                })
                logits = outputs['Out1'].val
                aux_logits = outputs['Out0'].val

            predicted_classes = tf.argmax(logits, 1, output_type=tf.int32)
            if mode == tf.estimator.ModeKeys.PREDICT:
                predictions = {
                    'class_ids': predicted_classes[:, tf.newaxis],
                    'probabilities': tf.nn.softmax(logits),
                    'logits': logits,
                }
                return tf.estimator.EstimatorSpec(mode, predictions=predictions)

            # define loss and optimizer
            train_vars = tf.trainable_variables()
            with tf.variable_scope('l2'):
                l2_loss = tf.add_n([
                    tf.nn.l2_loss(v) for v in train_vars if 'kernel' in v.name
                ]) * self.weight_decay
            onehot_labels = tf.one_hot(labels, 10)
            unreg_loss = tf.losses.softmax_cross_entropy(
                onehot_labels=onehot_labels,
                logits=logits,
                reduction=tf.losses.Reduction.MEAN)
            aux_loss = tf.losses.softmax_cross_entropy(
                onehot_labels=onehot_labels,
                logits=aux_logits,
                weights=.5,
                reduction=tf.losses.Reduction.MEAN) if 'Out1' in outputs else 0
            loss = unreg_loss + l2_loss + aux_loss
            if mode == tf.estimator.ModeKeys.EVAL:
                return tf.contrib.tpu.TPUEstimatorSpec(
                    mode,
                    loss=loss,
                    eval_metrics=(metric_fn, [labels, predicted_classes]))

            # Create training op.
            assert mode == tf.estimator.ModeKeys.TRAIN
            if self.num_parameters == -1:
                self.num_parameters = np.sum([
                    np.prod(v.get_shape().as_list())
                    for v in tf.trainable_variables()
                ])
            accuracy = metric_fn(labels, predicted_classes)['accuracy']
            tf.identity(accuracy[1], name='train_accuracy')
            learning_rate = self.get_learning_rate(step)
            metric_dict = {
                'batch_loss':
                loss,
                'learning_rate':
                learning_rate,
                'batch_accuracy':
                tf.reduce_mean(
                    tf.cast(tf.equal(predicted_classes, labels), tf.float32))
            }

            host_fn = None
            optimizer = self.get_optimizer(learning_rate)
            if self.use_tpu:
                host_fn = construct_host_fn(metric_dict,
                                            model_dir,
                                            prefix='training/',
                                            max_queue_size=self.steps_per_epoch)
                optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
            else:
                record_summaries(metric_dict, step)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                gvs = optimizer.compute_gradients(loss)
                gvs = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad),
                                 grad), val) for grad, val in gvs]
                train_op = optimizer.apply_gradients(
                    gvs, global_step=tf.train.get_or_create_global_step())

            return tf.contrib.tpu.TPUEstimatorSpec(mode,
                                                   loss=loss,
                                                   train_op=train_op,
                                                   host_call=host_fn)
Пример #14
0
    def eval(self, inputs, outputs, save_fn=None, state=None):
        if state is not None and 'model_dir' in state:
            model_dir = state['model_dir']
        else:
            model_dir = gcu.get_empty_bucket_folder(self.base_dir)
            if save_fn:
                save_fn({'model_dir': model_dir})
        logger.info('Using folder %s for evaluation', model_dir)
        latest = tf.train.latest_checkpoint(model_dir)
        init_epoch = 0
        train_dataset = input_fn('train',
                                 self.data_dir,
                                 batch_size=self.batch_size,
                                 train=True,
                                 num_outputs=len(outputs))
        val_dataset = input_fn('validation',
                               self.data_dir,
                               batch_size=self.batch_size,
                               train=False,
                               num_outputs=len(outputs))
        if latest:
            model = tf.keras.models.load_model(latest)
            init_epoch = int(latest.split('.')[-4])
        else:
            input_placeholder = tf.keras.Input(
                train_dataset.output_shapes[0][1:])
            x = input_placeholder
            step = tf.train.get_or_create_global_step()
            if 'in' in inputs:
                co.forward({inputs['in']: x})
                logits = outputs['out'].val
                logits = tf.keras.layers.Lambda(lambda x: x,
                                                name='final_logits')(logits)
                output_tensors = [
                    logits,
                ]
                loss_weights = [1.0]
                losses = [
                    lambda y_true, y_pred: tf.keras.losses.
                    sparse_categorical_crossentropy(
                        y_true, y_pred, from_logits=True),
                ]
                accuracy_metric_name = 'sparse_categorical_accuracy'
            else:
                co.forward({
                    inputs['in0']:
                    x,
                    inputs['in1']:
                    float(self.steps_per_epoch * self.max_num_training_epochs)
                })
                logits = outputs['out1'].val
                aux_logits = outputs['out0'].val
                logits = tf.keras.layers.Lambda(lambda x: x,
                                                name='final_logits')(logits)
                aux_logits = tf.keras.layers.Lambda(
                    lambda x: x, name='aux_logits')(aux_logits)
                output_tensors = [logits, aux_logits]
                loss_weights = [1.0, .5]
                losses = [
                    lambda y_true, y_pred: tf.keras.losses.
                    sparse_categorical_crossentropy(
                        y_true, y_pred, from_logits=True), lambda y_true,
                    y_pred: tf.keras.losses.sparse_categorical_crossentropy(
                        y_true, y_pred, from_logits=True)
                ]
                accuracy_metric_name = 'final_logits_sparse_categorical_accuracy'
            metrics = {'final_logits': ['sparse_categorical_accuracy']}
            model = tf.keras.Model(inputs=input_placeholder,
                                   outputs=output_tensors)
            if self.use_tpu:
                my_project_name = subprocess.check_output(
                    ['gcloud', 'config', 'get-value', 'project'])
                my_zone = subprocess.check_output(
                    ['gcloud', 'config', 'get-value', 'compute/zone'])
                cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
                    self.tpu_name.split(','),
                    zone=my_zone,
                    project=my_project_name)
                strategy = tf.contrib.tpu.TPUDistributionStrategy(
                    cluster_resolver)
                model = tf.contrib.tpu.keras_to_tpu_model(model, strategy)

            learning_rate = self.get_learning_rate(step)
            optimizer = get_optimizer(self.optimizer_type, learning_rate)
            tf.summary.scalar('learning_rate', learning_rate)
            tensorboard_callback = tf.keras.callbacks.TensorBoard(
                log_dir=model_dir)
            checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
                filepath=os.path.join(
                    model_dir, 'weights.{epoch:02d}.{val_loss:.2f}.hdf5'),
                period=(self.max_num_training_epochs // 3) + 1)
            model.compile(optimizer=optimizer,
                          loss=losses,
                          loss_weights=loss_weights,
                          metrics=metrics)
        num_parameters = int(
            np.sum([
                np.prod(v.get_shape().as_list())
                for v in set(model.trainable_weights)
            ]))
        logger.info('Number of parameters for %s: %d', model_dir,
                    num_parameters)
        # print(model.metrics_names)
        # return model
        timer_manager = ut.TimerManager()
        timer_manager.create_timer('eval')
        # return model

        training_history = model.fit(
            (lambda: train_dataset) if self.use_tpu else train_dataset,
            epochs=self.max_num_training_epochs,
            steps_per_epoch=self.steps_per_epoch,
            validation_data=(lambda: val_dataset)
            if self.use_tpu else val_dataset,
            validation_steps=self.steps_per_val_epoch,
            initial_epoch=init_epoch,
            callbacks=[tensorboard_callback, checkpoint_callback],
        )
        test_dataset = input_fn('eval',
                                self.data_dir,
                                batch_size=self.batch_size,
                                train=False,
                                num_outputs=len(outputs))
        test_results = model.evaluate(
            (lambda: test_dataset) if self.use_tpu else test_dataset,
            steps=5)  #self.steps_per_test_epoch)
        test_results = {
            'test_' + metric_name: test_results[ix]
            for (ix, metric_name) in enumerate(model.metrics_names)
        }

        results = {
            'validation_accuracy':
            training_history.history['val_' + accuracy_metric_name][-1],
            'validation_loss':
            training_history.history['val_loss'][-1],
            'num_parameters':
            num_parameters,
            'num_training_epochs':
            training_history.epoch,
        }
        results.update(test_results)
        results.update(training_history.history)
        results['test_accuracy'] = test_results['test_' + accuracy_metric_name]

        results['training_time_in_hours'] = timer_manager.get_time_since_event(
            'eval', 'start', units='hours')
        return results