Ejemplo n.º 1
0
def main(argv):
    cfg_inputs = {
        "features":[
            {
                "shape": [32,32,3],
                "key": "val",
                "dtype": tf.float32
            }
        ],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }            
    }

    cfg_resnet = {
        "type":"resnet_v2_block",
        "stride":1,
        "base_depth":3,
        "num_units": 1
    }

    features = {'val': tf.ones([10,32,32,3])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    resnet = ctfm.parse_layer(inputs['val'].get_shape().as_list(),cfg_resnet)
    print(resnet[2](features['val']))
    print(resnet[1])
Ejemplo n.º 2
0
def main(argv):
    cfg = {
        'type': 'spatial_integration',
    }

    cfg_inputs = {
        "features": [{
            "shape": [32, 32, 2],
            "key": "patch",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'patch': tf.ones([10, 32, 32, 2])}
    labels = tf.ones([10])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['patch'].get_shape(), cfg)
    print(function(features['patch']))
Ejemplo n.º 3
0
def main(argv):
    cfg = {'type': 'sampler', 'dims': 18, 'name': 'z'}

    config = {
        "model": {
            "inputs": {
                "features": [{
                    "shape": [1],
                    "key": "val",
                    "dtype": tf.float32
                }],
                "labels": {
                    "shape": [1],
                    "dtype": tf.float32
                }
            },
            "components": [{
                "name":
                "example",
                "input":
                "val",
                "layers": [{
                    "type": 'dense',
                    'units': 10
                }, {
                    'type': 'sampler',
                    'dims': 10,
                    'name': 'z'
                }],
                "output":
                'sample'
            }]
        }
    }

    model = config['model']

    features = {'val': tf.ones([10, 1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, model['inputs'])

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['val'].get_shape(), cfg)
    print(function(features['val']))

    example = ctfm.parse_component(inputs, model['components'][0], inputs)

    print(example[2](features['val']))
Ejemplo n.º 4
0
def main(argv):
    cfg0 = {'type': 'slice', 'begin': [0], 'size': [6]}

    cfg1 = {'type': 'slice', 'begin': [5], 'size': [12]}

    cfg_inputs = {
        "features": [{
            "shape": [18],
            "key": "val",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'val': tf.ones([10, 18])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    slice0 = _parse_slice(inputs['val'].get_shape(), cfg0)
    slice1 = _parse_slice(inputs['val'].get_shape(), cfg1)

    print(slice0[2](features['val']))
    print(slice1[2](features['val']))
Ejemplo n.º 5
0
def main(argv):
    config_filename = os.path.join(git_root, 'tests', 'model', 'config',
                                   'example_config.json')
    cfg = ctfm.parse_json(config_filename)

    features = {'val': tf.ones([10, 1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])

    outputs = {}

    encoder_layers, encoder_vars, encode = ctfm.parse_component(
        inputs, cfg['encoder'], outputs)
    print(encode(features['val']))
    print(outputs['logits'])
Ejemplo n.º 6
0
def main(argv):
    parser = argparse.ArgumentParser(description='Run training for specified model with fixed dataset creation.')
    parser.add_argument('export_dir',type=str,help='Path to store the model.')

    args = parser.parse_args()

    config_filename = os.path.join(git_root, 'examples','models','simple_fcnn','model.json')
    
    cfg_model = ctfm.parse_json(config_filename)['model']

    model_dir = args.export_dir

    params_dict = {
        'config': cfg_model,
        'model_dir': model_dir,
    }          

    classifier = tf.estimator.Estimator(
      model_fn=my_model,
      model_dir=model_dir,
      params=params_dict,
      config=tf.estimator.RunConfig(model_dir=model_dir, save_summary_steps=100, log_step_count_steps=100)
    )

    classifier = classifier.train(input_fn=train_func, steps=10000000)
Ejemplo n.º 7
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute latent code for image patch by model inference.')
    parser.add_argument('export_dir',
                        type=str,
                        help='Path to saved model to use for inference.')

    args = parser.parse_args()
    model_dir = args.export_dir

    config_filename = os.path.join(git_root, 'examples', 'models',
                                   'autoencoder', 'autoencoder.json')
    cfg = ctfm.parse_json(config_filename)['model']

    params_dict = {
        'config': cfg,
        'model_dir': model_dir,
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=model_dir,
                                            save_summary_steps=100,
                                            log_step_count_steps=100))

    classifier = classifier.train(input_fn=train_func, steps=10000)
Ejemplo n.º 8
0
def main(argv):
    cfg = {
        "inputs":{
            "features":[
                {
                    "shape": [1],
                    "key": "val",
                    "dtype": tf.float32
                }
            ],
            "labels": {
                "shape": [1],
                "dtype": tf.float32
            }
        },
        "encoder": {
            "input": "val",
            "layers": [
                {
                    "type":"dense",
                    "units": 10
                },
                {
                    "type":"activation",
                    "function": tf.nn.relu
                },
                {
                    "type":"dense",
                    "units": 1
                }
            ],
            "output":"logits"
        }
    }


    features = {'val': tf.ones([10,1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features,labels, cfg['inputs'])

    outputs = {}

    encoder_layers, encoder_vars, encode = ctfm.parse_component(inputs, cfg['encoder'], outputs) 
    print(encode(features['val']))
    print(outputs['logits'])
Ejemplo n.º 9
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']

    inputs, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])
    outputs = {}

    layers, variables, forward_pass = ctfm.parse_component(inputs, cfg['components'][0], outputs)

    optimizer = tf.train.AdagradOptimizer(learning_rate=0.001)
    loss = tf.losses.absolute_difference(labels, outputs['logits'])

    train_op = optimizer.minimize(loss,var_list=variables, global_step=tf.train.get_global_step())

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops={'loss' : loss})

    assert mode == tf.estimator.ModeKeys.TRAIN   
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Ejemplo n.º 10
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']

    tensors, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])
    components = {}

    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
    loss = tf.losses.absolute_difference(tensors['val'], tensors['logits'])

    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops={'loss': loss})

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Ejemplo n.º 11
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute latent code for image patch by model inference.')
    parser.add_argument('export_dir',
                        type=str,
                        help='Path to saved model to use for inference.')

    args = parser.parse_args()

    # Load config files, separated in this example.
    dataset_config_file = os.path.join(git_root, 'examples', 'dataset',
                                       'dataset.json')
    model_config_file = os.path.join(git_root, 'examples', 'dataset',
                                     'model.json')

    cfg_datasets = ctfm.parse_json(dataset_config_file)['datasets']
    cfg_model = ctfm.parse_json(model_config_file)['model']

    cfg_train_ds = cutil.safe_get('training', cfg_datasets)

    model_dir = args.export_dir

    params_dict = {
        'config': cfg_model,
        'model_dir': model_dir,
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=model_dir,
                                            save_summary_steps=100,
                                            log_step_count_steps=100))

    classifier = classifier.train(
        input_fn=ctfd.construct_train_fn(cfg_datasets),
        steps=cfg_train_ds['steps'])
Ejemplo n.º 12
0
def main(argv):
    with tf.Session(graph=tf.get_default_graph()).as_default() as sess:
        image = tf.convert_to_tensor(np.random.rand(1, 32, 32, 3),
                                     dtype=tf.float32,
                                     name='texture')
        flow = tf.convert_to_tensor(np.random.rand(1, 32, 32, 2),
                                    dtype=tf.float32,
                                    name='deformation')
        tensors = {'texture': image, 'deformation': flow}
        warper = ctfm.parse_component(tensors, warping_layer_conf, tensors)

        warped_image = warper[2](image)

        print(sess.run(warped_image))
Ejemplo n.º 13
0
def main(argv):
    cfg = {'type': 'reshape', 'shape': [10, 10]}

    cfg_inputs = {
        "features": [{
            "shape": [100],
            "key": "val",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'val': tf.ones([10, 100])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['val'].get_shape(), cfg)
    print(function(features['val']))
Ejemplo n.º 14
0
def main(argv):
    with tf.Session(graph=tf.get_default_graph()).as_default() as sess:
        filename = os.path.join(git_root, 'data', 'images',
                                'encoder_input.png')
        image = tf.expand_dims(ctfi.load(filename,
                                         width=32,
                                         height=32,
                                         channels=3),
                               0,
                               name='image_tensor')
        angle = tf.convert_to_tensor(np.random.rand(1, 1),
                                     dtype=tf.float32,
                                     name='angle_tensor')
        tensors = {'image_tensor': image, 'angle': angle}
        rotation_layer = ctfm.parse_component(tensors, rotation_layer_conf,
                                              tensors)

        rotated_image = rotation_layer[2](angle)

        plt.imshow(sess.run(rotated_image)[0])
        plt.show()
Ejemplo n.º 15
0
def main(argv):
    parser = argparse.ArgumentParser(description='Estimate moments for one feature of the dataset.')

    parser.add_argument('filename', type=str, help='Input dataset filename.')
    parser.add_argument('config',type=str, help='Input config file holding features description.')
    parser.add_argument('num_samples', type=int, help='Number of samples to use for estimation.')    
    parser.add_argument('feature', type=str, help='Key of feature for which to estimate the moments.')
    parser.add_argument('axes', type=lambda s: [int(item) for item in s.split(',')], help="Comma separated list of axis to use.")
    parser.add_argument('output', type=str, nargs=2, help='Path where to store the estimated parameters, mean and variance.')
    # parser.add_argument("--shuffle", help="If to shuffle the dataset.", action="store_true")

    args = parser.parse_args()
    
    dataset = tf.data.TFRecordDataset(args.filename, num_parallel_reads=8)

    decode_op = ctfd.construct_decode_op(ctfm.parse_json(args.config).get('datasets').get('features'))
    dataset = dataset.map(decode_op)
    mean, variance = ctfd.estimate_mean_and_variance(dataset, args.num_samples, args.axes, args.feature)

    print(mean.numpy())
    print(variance.numpy())

    np.save(args.output[0], mean)
    np.save(args.output[1], variance)
Ejemplo n.º 16
0
def main(argv):
    parser = argparse.ArgumentParser(description='TODO')
    parser.add_argument('config',
                        type=str,
                        help='Path to configuration file to use.')
    parser.add_argument(
        'mean',
        type=str,
        help='Path to npy file holding mean for normalization.')
    parser.add_argument(
        'variance',
        type=str,
        help='Path to npy file holding variance for normalization.')
    parser.add_argument('model_dir',
                        type=str,
                        help='Path to saved model to use for inference.')
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def _normalize_op(features):
        channels = [
            tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) /
                           stddev[channel], -1) for channel in range(3)
        ]
        features['patch'] = tf.concat(channels, 2)
        return features

    cutil.make_directory(args.model_dir)
    cutil.publish(args.model_dir)

    config_path = args.config
    config = ctfm.parse_json(config_path)

    config_datasets = config.get('datasets')
    config_model = config.get('model')

    train_fn = ctfd.construct_train_fn(config_datasets,
                                       operations=[_normalize_op])

    steps = int(
        config_datasets.get('training').get('size') /
        config_datasets.get('batch'))

    params_dict = {
        'config': config_model,
        'model_dir': args.model_dir,
        'mean': mean,
        'stddev': stddev
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=args.model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=args.model_dir,
                                            save_summary_steps=1000,
                                            log_step_count_steps=1000))

    if not os.path.exists(
            os.path.join(args.model_dir, os.path.basename(config_path))):
        shutil.copy2(config_path, args.model_dir)

    for epoch in range(config_datasets.get('training').get('epochs')):
        classifier = classifier.train(input_fn=train_fn, steps=steps)

    export_dir = os.path.join(args.model_dir, 'saved_model')
    cutil.make_directory(export_dir)
    cutil.publish(export_dir)

    cutil.publish(args.model_dir)
    cutil.publish(export_dir)
Ejemplo n.º 17
0
def main(argv):
    parser = argparse.ArgumentParser(description='TODO')
    parser.add_argument('config',
                        type=str,
                        help='Path to configuration file to use.')
    parser.add_argument(
        'mean',
        type=str,
        help='Path to npy file holding mean for normalization.')
    parser.add_argument(
        'variance',
        type=str,
        help='Path to npy file holding variance for normalization.')
    parser.add_argument('model_dir',
                        type=str,
                        help='Path to saved model to use for inference.')
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def _normalize_op(features):
        channels = [
            tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) /
                           stddev[channel], -1) for channel in range(3)
        ]
        features['patch'] = tf.concat(channels, 2)
        return features

    def _subsampling_op(features):
        features['patch'] = ctfi.subsample(features['patch'], 2)
        return features

    cutil.make_directory(args.model_dir)
    cutil.publish(args.model_dir)

    config_path = args.config
    config = ctfm.parse_json(config_path)

    config_datasets = config.get('datasets')
    config_model = config.get('model')

    train_fn = ctfd.construct_train_fn(config_datasets,
                                       operations=[_normalize_op])
    #def train_fn():
    #    dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(256,32,32,3))
    #    dataset = dataset.map(lambda x : ({"patch": x}, 0)).batch(256).repeat()
    #    return dataset

    steps = int(
        config_datasets.get('training').get('size') /
        config_datasets.get('batch'))

    params_dict = {'config': config_model, 'model_dir': args.model_dir}

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=args.model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=args.model_dir,
                                            save_summary_steps=1000,
                                            log_step_count_steps=1000))

    if not os.path.exists(
            os.path.join(args.model_dir, os.path.basename(config_path))):
        shutil.copy2(config_path, args.model_dir)

    for epoch in range(config_datasets.get('training').get('epochs')):
        classifier = classifier.train(input_fn=train_fn, steps=steps)

    export_dir = os.path.join(args.model_dir, 'saved_model')
    cutil.make_directory(export_dir)
    cutil.publish(export_dir)

    # TODO: Write command to create serving input receiver fn from config.
    serving_input_receiver_fn = ctfd.construct_serving_fn(
        config_model['inputs'])

    classifier.export_saved_model(export_dir, serving_input_receiver_fn)
    cutil.publish(args.model_dir)
    cutil.publish(export_dir)
Ejemplo n.º 18
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']
    cfg_inputs = cfg.get('inputs')
    cfg_labels = cfg_inputs.get('labels')

    # Get adaptive learning rate
    learning_rate = tf.train.polynomial_decay(
        learning_rate=1e-3,
        end_learning_rate=1e-4,
        global_step=tf.train.get_global_step(),
        decay_steps=2e7)

    tensors, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    # --------------------------------------------------------
    # Components
    # --------------------------------------------------------

    components = {}
    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    # --------------------------------------------------------
    # Losses
    # --------------------------------------------------------
    reconstr_loss = tf.reduce_mean(
        tf.reduce_sum(tf.losses.absolute_difference(
            tensors['patch'],
            tensors['logits'],
            reduction=tf.losses.Reduction.NONE),
                      axis=[1, 2, 3]))
    smoothness_loss = tf.reduce_mean(
        ctfm.deformation_smoothness_loss(tensors['deformation']))

    loss = reconstr_loss + 0.01 * smoothness_loss

    # --------------------------------------------------------
    # Training
    # --------------------------------------------------------
    optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    # --------------------------------------------------------
    # Summaries
    # --------------------------------------------------------
    def denormalize(image):
        channels = [
            tf.expand_dims(
                image[:, :, :, channel] * params['stddev'][channel] +
                params['mean'][channel], -1) for channel in range(3)
        ]
        return tf.concat(channels, 3)

    tf.summary.scalar('reconstr_loss', reconstr_loss)
    tf.summary.scalar('smoothness_loss', smoothness_loss)

    # Image summaries of patch and reconstruction
    tf.summary.image('images', tensors['patch'], 3)
    tf.summary.image('images_denormalized', denormalize(tensors['patch']), 3)
    tf.summary.image('reconstructions', tensors['logits'], 3)
    tf.summary.image('reconstructions_denormalized',
                     denormalize(tensors['logits']), 3)
    deformations_x, deformations_y = tf.split(tensors['deformation'], 2, 3)
    tf.summary.image('deformations_x', deformations_x, 3)
    tf.summary.image('deformations_y', deformations_y, 3)
    tf.summary.image('texture', denormalize(tensors['texture']), 3)
    tf.summary.image('rotated_texture',
                     denormalize(tensors['rotated_texture']), 3)
    tf.summary.image('texture_affine', denormalize(tensors['texture_affine']),
                     3)

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss=loss)

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Ejemplo n.º 19
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']
    cfg_inputs = cfg.get('inputs')
    cfg_labels = cfg_inputs.get('labels')
    cfg_embeddings = cfg.get('embeddings')
    cfg_embeddings.update({'model_dir': params['model_dir']})

    # Get adaptive learning rate
    learning_rate = tf.train.polynomial_decay(
        learning_rate=1e-3,
        end_learning_rate=1e-4,
        global_step=tf.train.get_global_step(),
        decay_steps=2e7)

    tensors, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    # --------------------------------------------------------
    # Components
    # --------------------------------------------------------

    components = {}
    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    #encoder = ctfm.parse_component(tensors, components['encoder'], tensors)
    #sampler = ctfm.parse_component(tensors, components['sampler'], tensors)
    #classifier = ctfm.parse_component(tensors, components['classifier'], tensors)
    #discriminator = ctfm.parse_component(tensors, components['discriminator'], tensors)
    #decoder = ctfm.parse_component(tensors, components['decoder'], tensors)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {'code': tensors['code'], 'logits': tensors['logits']}
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # --------------------------------------------------------
    # Losses
    # --------------------------------------------------------

    #latent_loss = ctfm.latent_loss(tensors['mean'], tensors['log_sigma_sq'])

    mvn_uniform = tf.contrib.distributions.MultivariateNormalDiag(
        loc=tf.zeros([
            tf.shape(tensors['code'])[0],
            cfg['parameters'].get('latent_space_size')
        ]),
        scale_diag=tf.ones([
            tf.shape(tensors['code'])[0],
            cfg['parameters'].get('latent_space_size')
        ]))

    latent_loss = tf.reduce_mean(
        tensors['distribution'].kl_divergence(mvn_uniform))

    #latent_loss = tf.reduce_mean(ctfm.multivariate_latent_loss(tensors['mean'], tensors['covariance']))
    #reconstr_loss = tf.losses.mean_squared_error(tensors['patch'], tensors['logits'])

    #reconstr_squared_diff = tf.math.squared_difference(tensors['patch'], tensors['logits'])
    #batch_reconstruction_loss = tf.reduce_sum(reconstr_squared_diff,axis=[1,2,3])
    #reconstr_loss = tf.reduce_mean(batch_reconstruction_loss)

    reconstr_loss = tf.reduce_mean(
        tf.reduce_sum(tf.losses.absolute_difference(
            tensors['patch'],
            tensors['logits'],
            reduction=tf.losses.Reduction.NONE),
                      axis=[1, 2, 3]))

    discriminator_loss = tf.losses.sparse_softmax_cross_entropy(
        labels=labels, logits=tensors['predictions_discriminator'])
    classifier_loss = tf.losses.sparse_softmax_cross_entropy(
        labels=labels, logits=tensors['predictions_classifier'])

    # Combine reconstruction loss, latent loss, prediction loss and negative discriminator loss
    loss = reconstr_loss + cfg['parameters'].get('beta') * latent_loss + cfg[
        'parameters'].get('alpha') * classifier_loss - cfg['parameters'].get(
            'delta') * discriminator_loss

    # --------------------------------------------------------
    # Training
    # --------------------------------------------------------

    optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)

    train_op_encoder = optimizer.minimize(
        loss, global_step=tf.train.get_global_step())
    train_op_discriminator = optimizer.minimize(
        discriminator_loss, var_list=[components['discriminator'][1]])
    train_op_classifier = optimizer.minimize(
        classifier_loss, var_list=[components['classifier'][1]])

    #decoder_variables = components['decoder_stain'][1] + components['decoder_structure'][1] + components['merger'][1]
    decoder_variables = components['decoder'][1]
    train_op_decoder = optimizer.minimize(reconstr_loss,
                                          var_list=decoder_variables)

    train_op = tf.group([
        train_op_encoder, train_op_discriminator, train_op_classifier,
        train_op_decoder
    ])

    # --------------------------------------------------------
    # Summaries
    # --------------------------------------------------------

    # Predictions from classifier and discriminator
    predicted_classes_classifier = tf.argmax(tensors['predictions_classifier'],
                                             1)
    predicted_classes_discriminator = tf.argmax(
        tensors['predictions_discriminator'], 1)

    classifier_accuracy = tf.metrics.accuracy(
        labels=labels,
        predictions=predicted_classes_classifier,
        name='acc_op_classifier')
    discriminator_accuracy = tf.metrics.accuracy(
        labels=labels,
        predictions=predicted_classes_discriminator,
        name='acc_op_discriminator')

    classifier_confusion = ctfb.confusion_metric(labels,
                                                 predicted_classes_classifier,
                                                 cfg_labels.get('num_classes'),
                                                 name='classifier')
    discriminator_confusion = ctfb.confusion_metric(
        labels,
        predicted_classes_discriminator,
        cfg_labels.get('num_classes'),
        name='discriminator')

    metrics = {
        'classifier_confusion': classifier_confusion,
        'classifier_accuracy': classifier_accuracy,
        'discriminator_confusion': discriminator_confusion,
        'discriminator_accuracy': discriminator_accuracy
    }

    tf.summary.scalar('classifier_accuracy', classifier_accuracy[1])
    ctfb.plot_confusion_matrix(classifier_confusion[1],
                               cfg_labels.get('names'),
                               tensor_name='confusion_matrix_classifier',
                               normalize=True)

    tf.summary.scalar('discriminator_accuracy', discriminator_accuracy[1])
    ctfb.plot_confusion_matrix(discriminator_confusion[1],
                               cfg_labels.get('names'),
                               tensor_name='confusion_matrix_discriminator',
                               normalize=True)

    # Losses scalar summaries
    tf.summary.scalar('reconstr_loss', reconstr_loss)
    tf.summary.scalar('latent_loss', latent_loss)
    tf.summary.scalar('classifier_loss', classifier_loss)
    tf.summary.scalar('discriminator_loss', discriminator_loss)

    # Image summaries of patch and reconstruction
    tf.summary.image('images', tensors['patch'], 1)
    tf.summary.image('reconstructions', tensors['logits'], 1)

    embedding_hook = ctfb.EmbeddingSaverHook(
        tf.get_default_graph(), cfg_embeddings, tensors['code'].name,
        tensors['code'], tensors['patch'].name, labels.name,
        cfg_labels.get('names'))

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=metrics)

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode,
                                      loss=loss,
                                      train_op=train_op,
                                      training_hooks=[embedding_hook])