Пример #1
0
def main(argv):
    cfg0 = {'type': 'slice', 'begin': [0], 'size': [6]}

    cfg1 = {'type': 'slice', 'begin': [5], 'size': [12]}

    cfg_inputs = {
        "features": [{
            "shape": [18],
            "key": "val",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'val': tf.ones([10, 18])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    slice0 = _parse_slice(inputs['val'].get_shape(), cfg0)
    slice1 = _parse_slice(inputs['val'].get_shape(), cfg1)

    print(slice0[2](features['val']))
    print(slice1[2](features['val']))
Пример #2
0
def main(argv):
    cfg = {
        'type': 'spatial_integration',
    }

    cfg_inputs = {
        "features": [{
            "shape": [32, 32, 2],
            "key": "patch",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'patch': tf.ones([10, 32, 32, 2])}
    labels = tf.ones([10])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['patch'].get_shape(), cfg)
    print(function(features['patch']))
Пример #3
0
def main(argv):
    cfg_inputs = {
        "features":[
            {
                "shape": [32,32,3],
                "key": "val",
                "dtype": tf.float32
            }
        ],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }            
    }

    cfg_resnet = {
        "type":"resnet_v2_block",
        "stride":1,
        "base_depth":3,
        "num_units": 1
    }

    features = {'val': tf.ones([10,32,32,3])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    resnet = ctfm.parse_layer(inputs['val'].get_shape().as_list(),cfg_resnet)
    print(resnet[2](features['val']))
    print(resnet[1])
Пример #4
0
def main(argv):
    cfg = {'type': 'sampler', 'dims': 18, 'name': 'z'}

    config = {
        "model": {
            "inputs": {
                "features": [{
                    "shape": [1],
                    "key": "val",
                    "dtype": tf.float32
                }],
                "labels": {
                    "shape": [1],
                    "dtype": tf.float32
                }
            },
            "components": [{
                "name":
                "example",
                "input":
                "val",
                "layers": [{
                    "type": 'dense',
                    'units': 10
                }, {
                    'type': 'sampler',
                    'dims': 10,
                    'name': 'z'
                }],
                "output":
                'sample'
            }]
        }
    }

    model = config['model']

    features = {'val': tf.ones([10, 1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, model['inputs'])

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['val'].get_shape(), cfg)
    print(function(features['val']))

    example = ctfm.parse_component(inputs, model['components'][0], inputs)

    print(example[2](features['val']))
Пример #5
0
def main(argv):
    config_filename = os.path.join(git_root, 'tests', 'model', 'config',
                                   'example_config.json')
    cfg = ctfm.parse_json(config_filename)

    features = {'val': tf.ones([10, 1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])

    outputs = {}

    encoder_layers, encoder_vars, encode = ctfm.parse_component(
        inputs, cfg['encoder'], outputs)
    print(encode(features['val']))
    print(outputs['logits'])
Пример #6
0
def main(argv):
    cfg = {
        "inputs":{
            "features":[
                {
                    "shape": [1],
                    "key": "val",
                    "dtype": tf.float32
                }
            ],
            "labels": {
                "shape": [1],
                "dtype": tf.float32
            }
        },
        "encoder": {
            "input": "val",
            "layers": [
                {
                    "type":"dense",
                    "units": 10
                },
                {
                    "type":"activation",
                    "function": tf.nn.relu
                },
                {
                    "type":"dense",
                    "units": 1
                }
            ],
            "output":"logits"
        }
    }


    features = {'val': tf.ones([10,1])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features,labels, cfg['inputs'])

    outputs = {}

    encoder_layers, encoder_vars, encode = ctfm.parse_component(inputs, cfg['encoder'], outputs) 
    print(encode(features['val']))
    print(outputs['logits'])
Пример #7
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']

    inputs, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])
    outputs = {}

    layers, variables, forward_pass = ctfm.parse_component(inputs, cfg['components'][0], outputs)

    optimizer = tf.train.AdagradOptimizer(learning_rate=0.001)
    loss = tf.losses.absolute_difference(labels, outputs['logits'])

    train_op = optimizer.minimize(loss,var_list=variables, global_step=tf.train.get_global_step())

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops={'loss' : loss})

    assert mode == tf.estimator.ModeKeys.TRAIN   
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Пример #8
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']

    tensors, labels = ctfm.parse_inputs(features, labels, cfg['inputs'])
    components = {}

    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
    loss = tf.losses.absolute_difference(tensors['val'], tensors['logits'])

    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops={'loss': loss})

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Пример #9
0
def main(argv):
    cfg = {'type': 'reshape', 'shape': [10, 10]}

    cfg_inputs = {
        "features": [{
            "shape": [100],
            "key": "val",
            "dtype": tf.float32
        }],
        "labels": {
            "shape": [1],
            "dtype": tf.float32
        }
    }

    features = {'val': tf.ones([10, 100])}
    labels = tf.ones([1])

    inputs, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    layer, variables, function, output_shape = ctfm.parse_layer(
        inputs['val'].get_shape(), cfg)
    print(function(features['val']))
Пример #10
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']
    cfg_inputs = cfg.get('inputs')
    cfg_labels = cfg_inputs.get('labels')

    # Get adaptive learning rate
    learning_rate = tf.train.polynomial_decay(
        learning_rate=1e-3,
        end_learning_rate=1e-4,
        global_step=tf.train.get_global_step(),
        decay_steps=2e7)

    tensors, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    # --------------------------------------------------------
    # Components
    # --------------------------------------------------------

    components = {}
    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    # --------------------------------------------------------
    # Losses
    # --------------------------------------------------------
    reconstr_loss = tf.reduce_mean(
        tf.reduce_sum(tf.losses.absolute_difference(
            tensors['patch'],
            tensors['logits'],
            reduction=tf.losses.Reduction.NONE),
                      axis=[1, 2, 3]))
    smoothness_loss = tf.reduce_mean(
        ctfm.deformation_smoothness_loss(tensors['deformation']))

    loss = reconstr_loss + 0.01 * smoothness_loss

    # --------------------------------------------------------
    # Training
    # --------------------------------------------------------
    optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    # --------------------------------------------------------
    # Summaries
    # --------------------------------------------------------
    def denormalize(image):
        channels = [
            tf.expand_dims(
                image[:, :, :, channel] * params['stddev'][channel] +
                params['mean'][channel], -1) for channel in range(3)
        ]
        return tf.concat(channels, 3)

    tf.summary.scalar('reconstr_loss', reconstr_loss)
    tf.summary.scalar('smoothness_loss', smoothness_loss)

    # Image summaries of patch and reconstruction
    tf.summary.image('images', tensors['patch'], 3)
    tf.summary.image('images_denormalized', denormalize(tensors['patch']), 3)
    tf.summary.image('reconstructions', tensors['logits'], 3)
    tf.summary.image('reconstructions_denormalized',
                     denormalize(tensors['logits']), 3)
    deformations_x, deformations_y = tf.split(tensors['deformation'], 2, 3)
    tf.summary.image('deformations_x', deformations_x, 3)
    tf.summary.image('deformations_y', deformations_y, 3)
    tf.summary.image('texture', denormalize(tensors['texture']), 3)
    tf.summary.image('rotated_texture',
                     denormalize(tensors['rotated_texture']), 3)
    tf.summary.image('texture_affine', denormalize(tensors['texture_affine']),
                     3)

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss=loss)

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Пример #11
0
def my_model(features, labels, mode, params, config):
    cfg = params['config']
    cfg_inputs = cfg.get('inputs')
    cfg_labels = cfg_inputs.get('labels')
    cfg_embeddings = cfg.get('embeddings')
    cfg_embeddings.update({'model_dir': params['model_dir']})

    # Get adaptive learning rate
    learning_rate = tf.train.polynomial_decay(
        learning_rate=1e-3,
        end_learning_rate=1e-4,
        global_step=tf.train.get_global_step(),
        decay_steps=2e7)

    tensors, labels = ctfm.parse_inputs(features, labels, cfg_inputs)

    # --------------------------------------------------------
    # Components
    # --------------------------------------------------------

    components = {}
    for comp in cfg['components']:
        components[comp['name']] = ctfm.parse_component(tensors, comp, tensors)

    #encoder = ctfm.parse_component(tensors, components['encoder'], tensors)
    #sampler = ctfm.parse_component(tensors, components['sampler'], tensors)
    #classifier = ctfm.parse_component(tensors, components['classifier'], tensors)
    #discriminator = ctfm.parse_component(tensors, components['discriminator'], tensors)
    #decoder = ctfm.parse_component(tensors, components['decoder'], tensors)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {'code': tensors['code'], 'logits': tensors['logits']}
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # --------------------------------------------------------
    # Losses
    # --------------------------------------------------------

    #latent_loss = ctfm.latent_loss(tensors['mean'], tensors['log_sigma_sq'])

    mvn_uniform = tf.contrib.distributions.MultivariateNormalDiag(
        loc=tf.zeros([
            tf.shape(tensors['code'])[0],
            cfg['parameters'].get('latent_space_size')
        ]),
        scale_diag=tf.ones([
            tf.shape(tensors['code'])[0],
            cfg['parameters'].get('latent_space_size')
        ]))

    latent_loss = tf.reduce_mean(
        tensors['distribution'].kl_divergence(mvn_uniform))

    #latent_loss = tf.reduce_mean(ctfm.multivariate_latent_loss(tensors['mean'], tensors['covariance']))
    #reconstr_loss = tf.losses.mean_squared_error(tensors['patch'], tensors['logits'])

    #reconstr_squared_diff = tf.math.squared_difference(tensors['patch'], tensors['logits'])
    #batch_reconstruction_loss = tf.reduce_sum(reconstr_squared_diff,axis=[1,2,3])
    #reconstr_loss = tf.reduce_mean(batch_reconstruction_loss)

    reconstr_loss = tf.reduce_mean(
        tf.reduce_sum(tf.losses.absolute_difference(
            tensors['patch'],
            tensors['logits'],
            reduction=tf.losses.Reduction.NONE),
                      axis=[1, 2, 3]))

    discriminator_loss = tf.losses.sparse_softmax_cross_entropy(
        labels=labels, logits=tensors['predictions_discriminator'])
    classifier_loss = tf.losses.sparse_softmax_cross_entropy(
        labels=labels, logits=tensors['predictions_classifier'])

    # Combine reconstruction loss, latent loss, prediction loss and negative discriminator loss
    loss = reconstr_loss + cfg['parameters'].get('beta') * latent_loss + cfg[
        'parameters'].get('alpha') * classifier_loss - cfg['parameters'].get(
            'delta') * discriminator_loss

    # --------------------------------------------------------
    # Training
    # --------------------------------------------------------

    optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)

    train_op_encoder = optimizer.minimize(
        loss, global_step=tf.train.get_global_step())
    train_op_discriminator = optimizer.minimize(
        discriminator_loss, var_list=[components['discriminator'][1]])
    train_op_classifier = optimizer.minimize(
        classifier_loss, var_list=[components['classifier'][1]])

    #decoder_variables = components['decoder_stain'][1] + components['decoder_structure'][1] + components['merger'][1]
    decoder_variables = components['decoder'][1]
    train_op_decoder = optimizer.minimize(reconstr_loss,
                                          var_list=decoder_variables)

    train_op = tf.group([
        train_op_encoder, train_op_discriminator, train_op_classifier,
        train_op_decoder
    ])

    # --------------------------------------------------------
    # Summaries
    # --------------------------------------------------------

    # Predictions from classifier and discriminator
    predicted_classes_classifier = tf.argmax(tensors['predictions_classifier'],
                                             1)
    predicted_classes_discriminator = tf.argmax(
        tensors['predictions_discriminator'], 1)

    classifier_accuracy = tf.metrics.accuracy(
        labels=labels,
        predictions=predicted_classes_classifier,
        name='acc_op_classifier')
    discriminator_accuracy = tf.metrics.accuracy(
        labels=labels,
        predictions=predicted_classes_discriminator,
        name='acc_op_discriminator')

    classifier_confusion = ctfb.confusion_metric(labels,
                                                 predicted_classes_classifier,
                                                 cfg_labels.get('num_classes'),
                                                 name='classifier')
    discriminator_confusion = ctfb.confusion_metric(
        labels,
        predicted_classes_discriminator,
        cfg_labels.get('num_classes'),
        name='discriminator')

    metrics = {
        'classifier_confusion': classifier_confusion,
        'classifier_accuracy': classifier_accuracy,
        'discriminator_confusion': discriminator_confusion,
        'discriminator_accuracy': discriminator_accuracy
    }

    tf.summary.scalar('classifier_accuracy', classifier_accuracy[1])
    ctfb.plot_confusion_matrix(classifier_confusion[1],
                               cfg_labels.get('names'),
                               tensor_name='confusion_matrix_classifier',
                               normalize=True)

    tf.summary.scalar('discriminator_accuracy', discriminator_accuracy[1])
    ctfb.plot_confusion_matrix(discriminator_confusion[1],
                               cfg_labels.get('names'),
                               tensor_name='confusion_matrix_discriminator',
                               normalize=True)

    # Losses scalar summaries
    tf.summary.scalar('reconstr_loss', reconstr_loss)
    tf.summary.scalar('latent_loss', latent_loss)
    tf.summary.scalar('classifier_loss', classifier_loss)
    tf.summary.scalar('discriminator_loss', discriminator_loss)

    # Image summaries of patch and reconstruction
    tf.summary.image('images', tensors['patch'], 1)
    tf.summary.image('reconstructions', tensors['logits'], 1)

    embedding_hook = ctfb.EmbeddingSaverHook(
        tf.get_default_graph(), cfg_embeddings, tensors['code'].name,
        tensors['code'], tensors['patch'].name, labels.name,
        cfg_labels.get('names'))

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=metrics)

    assert mode == tf.estimator.ModeKeys.TRAIN
    return tf.estimator.EstimatorSpec(mode,
                                      loss=loss,
                                      train_op=train_op,
                                      training_hooks=[embedding_hook])