Пример #1
0
def _parse_dense_layer(config: dict) -> tf.layers.Dense:
    """
    Function to build dense layer with specific config.

    Parameters
    ----------
    config: dict holding 'units' key.
    
    Optional Keys: 'activation','kernel_initializer','name', 'trainable

    Returns
    -------
    layer: tf.layers.Dense with specified configuration.
    """
    activation = cutil.safe_get('activation', config)
    kernel_initializer = config.get('kernel_initializer',
                                    tf.initializers.lecun_uniform())
    bias_initializer = config.get('bias_initializer', tf.ones_initializer())
    name = cutil.safe_get('name', config)
    trainable = cutil.safe_get('trainable', config)

    layer = tf.layers.Dense(config['units'],
                            activation=activation,
                            kernel_initializer=kernel_initializer,
                            bias_initializer=bias_initializer,
                            name=name,
                            trainable=trainable)
    return layer
Пример #2
0
def main(argv):
    parser = argparse.ArgumentParser(description='Split and shuf')

    parser.add_argument('input', type=str, help='Input tfrecords file.')
    parser.add_argument('factor',
                        type=float,
                        help='Fraction of samples to keep in larger file.')
    parser.add_argument('output',
                        type=str,
                        nargs=2,
                        help='Path where to store the dataset')
    # parser.add_argument("--shuffle", help="If to shuffle the dataset.", action="store_true")

    args = parser.parse_args()

    dataset = tf.data.TFRecordDataset(args.input, num_parallel_reads=8)

    samples = list()

    for elem in tfe.Iterator(dataset):
        samples.append(elem)

    first, second = cutil.split_shuffle_list(samples, args.factor)

    ctfd.write([x.numpy() for x in first], None, args.output[0])
    cutil.publish(args.output[0])
    ctfd.write([x.numpy() for x in second], None, args.output[1])
    cutil.publish(args.output[1])
Пример #3
0
def construct_train_fn(config, operations=[]):
    """
    Function to construct the training function based on the config.

    Parameters
    ----------
    config: dict holding model configuration.

    Returns
    -------
    train_fn: callable which is passed to estimator.train function.
    This function prepares the dataset and returns it in a format which is suitable for the estimator API.
    """

    cfg_train_ds = cutil.safe_get('training', config)

    # Create decode operation
    decode_op = construct_decode_op(config['features'])

    # Create unzip operation
    unzip_op = construct_unzip_op()

    operations.insert(0, decode_op)
    if 'operations' in cfg_train_ds:
        for op in cfg_train_ds['operations']:
            operations.append(cutil.get_function(op['module'], op['name']))

    operations.append(unzip_op)
    preprocess = cutil.concatenate_functions(operations)

    def train_fn():
        """
        Function which is passed to .train(...) call of an estimator object.

        Returns
        -------
        dataset: tf.data.Dataset object with elements ({'f0': v0, ... 'fx': vx}, label).
        """
        #Load the dataset
        dataset = tf.data.TFRecordDataset(cfg_train_ds['filename'])

        # Apply possible preprocessing, batch and prefetch the dataset.
        dataset = dataset.map(preprocess, num_parallel_calls=os.cpu_count())

        sample = tf.data.experimental.get_single_element(dataset.take(1))
        element_size = get_deep_size(sample)

        # Shuffle the dataset
        buffer_size = tf.constant(
            int((virtual_memory().total / 2) / element_size), tf.int64)
        dataset = dataset.shuffle(config['shuffle_size'])

        dataset = dataset.batch(config['batch'])
        dataset = dataset.prefetch(buffer_size=1)
        return dataset.repeat()

    return train_fn
Пример #4
0
def _parse_activation(config: dict):
    """
    Parse activation function and return callable with specified name.

    Parameters
    ----------
    config: dict with key 'function'.
    Optional Keys: 'name'

    Returns
    -------
    lambda x: function(x, name=name)
    """
    name = cutil.safe_get('name', config)
    function = cutil.safe_get('function', config)
    return lambda x: function(x, name=name)
Пример #5
0
def _parse_batchnorm_layer(config: dict) -> tf.layers.BatchNormalization:
    """
    Function to create batch normalization layer on specified axis.

    Parameters
    ----------
    config: dict with key 'axis'.

    Optional Keys: 'name'

    Returns
    -------
    layer: tf.layers.BatchNormalization(axis=axis,name=name)
    """
    axis = cutil.safe_get('axis', config)
    name = cutil.safe_get('name', config)
    return tf.layers.BatchNormalization(axis=axis, name=name)
Пример #6
0
def _parse_avgunpool_layer(config: dict):
    """
    Function to create and avg unpooling layer with given factor.
    This is a custom implementation.

    Parameters
    ----------
    config: dict holding key 'factor'.
    
    Optional Keys: 'name'

    Returns
    -------
    lambda x: avg_unpool2d(x, factor, name=name) callable which performs the desired operation.
    """
    name = cutil.safe_get('name', config)
    factor = cutil.safe_get('factor', config)
    return lambda x: avg_unpool2d(x, factor, name=name)
Пример #7
0
def main(argv):
    filename = 'dummy_file.py'
    path = os.path.join(git_root, 'tests', 'utility', filename)
    function_name = 'dummy_function'
    args = 'blub'
    print(path)

    function = cutil.get_function(path, function_name)
    function(args)
Пример #8
0
def main(argv):
    parser = argparse.ArgumentParser(description='Create tfrecords dataset holding filenames matching a pattern')

    parser.add_argument('input_directory', type=str, help='Path where pattern is evaluated')
    parser.add_argument('pattern', type=str, help='Pattern to be evaluated')
    parser.add_argument('output_filename', type=str, help='Path where to store the dataset')

    args = parser.parse_args()
    
    # Collect all files matching the specified pattern
    filenames = cutil.collect_files(args.input_directory,args.pattern)

    # Encoding function
    def func_encode(sample):
        feature = { 'filename': ctf.string_feature(sample) }
        return tf.train.Example(features=tf.train.Features(feature=feature))

    ctfd.write(filenames, func_encode, args.output_filename)
    cutil.publish(args.output_filename)
Пример #9
0
def _parse_maxpool_layer(config: dict) -> tf.layers.MaxPooling2D:
    """
    Function to build MaxPooling2D layer with specific config.

    Parameters
    ----------
    config: dict holding 'pool_size' and 'strides' key.
    
    Optional Keys: 'name'

    Returns
    -------
    layer: tf.layers.MaxPooling2D with specified configuration.
    """
    # Retrieve attributes from config
    pool_size = cutil.safe_get('pool_size', config)
    strides = cutil.safe_get('strides', config)
    name = cutil.safe_get('name', config)

    return tf.layers.MaxPooling2D(pool_size, strides, name=name)
Пример #10
0
def main(argv):

    x_train = np.linspace(-10, 10, num=100000)
    y_train = [np.math.sin(x) for x in x_train]

    data = zip(x_train, y_train)

    # Encoding function
    def func_encode(sample):
        x, y = sample
        features = {
            'val': ctf.float_feature([x]),
            'label': ctf.float_feature([y])
        }
        return tf.train.Example(features=tf.train.Features(feature=features))

    filename = os.path.join(git_root, 'examples', 'training', 'dataset',
                            'training_ds.tfrecords')
    ctfd.write(data, func_encode, filename)
    cutil.publish(filename)
Пример #11
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute latent code for image patch by model inference.')

    parser.add_argument('export_dir',
                        type=str,
                        help='Path to saved model to use for inference.')
    parser.add_argument('filename',
                        type=str,
                        help='Image file or numpy array to run inference on.')
    parser.add_argument('--output',
                        type=str,
                        help='Where to store the output.')

    args = parser.parse_args()

    predict_fn = predictor.from_saved_model(args.export_dir)

    # Extract patch size and latent space size from the model identifier
    patch_size = ctfsm.determine_patch_size(args.export_dir)
    latent_space_size = ctfsm.determine_latent_space_size(args.export_dir)

    image = None

    # Check if it is image or numpy array data
    if ctfi.is_image(args.filename):
        image = ctfi.load(args.filename).numpy()
    elif cutil.is_numpy_format(args.filename):
        image = np.load(args.filename)
    else:
        sys.exit(3)

    # Resize image to match size required by the model
    image = np.resize(image, [patch_size, patch_size, 3])

    batch = np.expand_dims(image, 0)
    # Make predictions
    pred = predict_fn({
        'fixed': batch,
        'moving': np.random.rand(1, patch_size, patch_size, 3),
        'embedding': np.random.rand(1, 1, 1, latent_space_size)
    })
    latent_code = pred['latent_code_fixed']
    print(latent_code)

    if args.output:
        with open(args.output, 'w') as f:
            json.dump(
                {
                    'filename': args.filename,
                    'model': args.export_dir,
                    'latent_code': latent_code.tolist()
                }, f)
Пример #12
0
def _parse_maxunpool_layer(config: dict):
    """
    Function to create max_unpool2d layer.
    This is a custom implementation.

    Parameters
    ----------    
        dict: Optional Keys: 'name'

    Returns
    -------
        lambda x: max_unpool2d(x, name=name)
    """
    name = cutil.safe_get('name', config)
    return lambda x: max_unpool2d(x, name=name)
Пример #13
0
def _parse_conv_layer(config: dict):
    """
    Function to build convolutional 2d layer with specific config.
    Pass 'transpose': True in config to create deconvolution layer.

    Parameters
    ----------
    config: dict holding 'filters', 'strides' and 'kernel_size' keys.

    Optional Keys: 'activation','kernel_initializer','name','bias_initializer', 'trainable', 'transpose'

    Returns
    -------
    layer: tf.layers.Conv2D or tf.layers.Conv2DTranspose with specified configuration.
    """
    filters = config['filters']
    strides = cutil.safe_get('strides', config)
    kernel_size = cutil.safe_get('kernel_size', config)
    name = cutil.safe_get('name', config)
    activation = cutil.safe_get('activation', config)
    kernel_initializer = config.get('kernel_initializer',
                                    tf.initializers.lecun_uniform())
    bias_initializer = config.get('bias_initializer', tf.ones_initializer())
    trainable = cutil.safe_get('trainable', config)
    transpose = cutil.safe_get('transpose', config)
    padding = config.get('padding', 'same')

    if transpose is not None and transpose == True:
        layer = tf.layers.Conv2DTranspose(
            filters,
            kernel_size,
            strides,
            padding=padding,
            name=name,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            activation=activation,
            trainable=trainable)
    else:
        layer = tf.layers.Conv2D(filters,
                                 kernel_size,
                                 strides,
                                 padding=padding,
                                 name=name,
                                 kernel_initializer=kernel_initializer,
                                 bias_initializer=bias_initializer,
                                 activation=activation,
                                 trainable=trainable)
    return layer
Пример #14
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute latent code for image patch by model inference.')
    parser.add_argument('export_dir',
                        type=str,
                        help='Path to saved model to use for inference.')

    args = parser.parse_args()

    # Load config files, separated in this example.
    dataset_config_file = os.path.join(git_root, 'examples', 'dataset',
                                       'dataset.json')
    model_config_file = os.path.join(git_root, 'examples', 'dataset',
                                     'model.json')

    cfg_datasets = ctfm.parse_json(dataset_config_file)['datasets']
    cfg_model = ctfm.parse_json(model_config_file)['model']

    cfg_train_ds = cutil.safe_get('training', cfg_datasets)

    model_dir = args.export_dir

    params_dict = {
        'config': cfg_model,
        'model_dir': model_dir,
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=model_dir,
                                            save_summary_steps=100,
                                            log_step_count_steps=100))

    classifier = classifier.train(
        input_fn=ctfd.construct_train_fn(cfg_datasets),
        steps=cfg_train_ds['steps'])
def main(argv):
    parser = argparse.ArgumentParser(
        description=
        'Create tfrecords dataset holding patches of images specified by filename in input dataset.'
    )

    parser.add_argument('input_dataset',
                        type=str,
                        help='Path to dataset holding image filenames')
    parser.add_argument('output_dataset',
                        type=str,
                        help='Path where to store the output dataset')
    parser.add_argument(
        'patch_size',
        type=int,
        help='Patch size which to use in the preprocessed dataset')
    parser.add_argument('num_samples', type=int, help='Size of output dataset')
    parser.add_argument(
        'labels',
        type=lambda s: [item for item in s.split(',')],
        help="Comma separated list of labels to find in filenames.")
    parser.add_argument('--image_size',
                        type=int,
                        dest='image_size',
                        help='Image size for files pointed to by filename')
    parser.add_argument(
        '--no_filter',
        dest='no_filter',
        action='store_true',
        default=False,
        help='Whether to apply total image variation filtering.')
    parser.add_argument(
        '--threshold',
        type=float,
        dest='threshold',
        help='Threshold for filtering the samples according to variation.')
    parser.add_argument('--subsampling_factor',
                        type=int,
                        dest='subsampling_factor',
                        default=1,
                        help='Subsampling factor to use to downsample images.')
    args = parser.parse_args()

    labels_table = tf.contrib.lookup.index_table_from_tensor(
        mapping=args.labels)

    filename_dataset = tf.data.TFRecordDataset(
        args.input_dataset,
        num_parallel_reads=8).map(_decode_example_filename).shuffle(100000)

    functions = [
        tf.Variable(label, name='const_' + label).value
        for label in args.labels
    ]
    false_fn = tf.Variable('None', name='none_label').value

    def _extract_label(filename):
        #base_size = tf.size(tf.string_split([filename],""))
        #predicates = [tf.equal(base_size, tf.size(tf.string_split([tf.regex_replace(filename, "/"+ label + "/", "")])))  for label in args.labels]

        match = [
            tf.math.reduce_any(
                tf.strings.regex_full_match(
                    tf.string_split([filename], '/').values, label))
            for label in args.labels
        ]
        pred_fn_pairs = list(zip(match, functions))
        return tf.case(pred_fn_pairs, default=false_fn, exclusive=True)

    # Load images and extract the label from the filename
    if args.image_size is not None:
        images_dataset = filename_dataset.map(
            lambda feature: {
                'image':
                ctfi.load(feature['filename'],
                          channels=3,
                          width=args.image_size,
                          height=args.image_size),
                'label':
                labels_table.lookup(_extract_label(feature['filename']))
            })
    else:
        images_dataset = filename_dataset.map(
            lambda feature: {
                'image': ctfi.load(feature['filename'], channels=3),
                'label': labels_table.lookup(
                    _extract_label(feature['filename']))
            })

    if args.subsampling_factor > 1:
        images_dataset = images_dataset.map(
            lambda feature: {
                'image': ctfi.subsample(feature['image'], args.
                                        subsampling_factor),
                'label': feature['label']
            })

    def _filter_func_label(features):
        label = features['label']
        result = label > -1
        return result

    images_dataset = images_dataset.filter(_filter_func_label).shuffle(100)

    # Extract image patches

    #for sample in tfe.Iterator(images_dataset):
    #    print(sample['label'])

    def _split_patches(features):
        patches = ctfi.extract_patches(features['image'], args.patch_size)
        labels = tf.expand_dims(tf.reshape(features['label'], [1]), 0)
        labels = tf.tile(labels, tf.stack([tf.shape(patches)[0], 1]))
        return (patches, labels)

    patches_dataset = images_dataset.map(_split_patches).apply(
        tf.data.experimental.unbatch())

    patches_dataset = patches_dataset.map(lambda patch, label: {
        'patch': patch,
        'label': label
    })

    if args.threshold is not None:
        threshold = args.threshold
    else:
        threshold = 0.08

    num_filtered_patches = tf.Variable(0)
    filtered_patch_ratio = 10

    # Filter function which filters the dataset after total image variation.
    # See: https://www.tensorflow.org/versions/r1.12/api_docs/python/tf/image/total_variation
    def add_background_info(sample):
        variation = tf.image.total_variation(sample['patch'])
        num_pixels = sample['patch'].get_shape().num_elements()
        var_per_pixel = (variation / num_pixels)
        no_background = var_per_pixel > threshold
        sample['no_background'] = no_background
        return sample

        #def true_fn():
        #     sample.update({'no_background': True})
        #     return sample
        #def false_fn():
        #    def _true_fn_lvl2():
        #        sample.update({'label':tf.reshape(tf.convert_to_tensor(len(args.labels), dtype=tf.int64), [1]),'no_background': True})
        #        return sample
        #    def _false_fn_lvl2():
        #        sample.update({'no_background': False})
        #        return sample
        #    pred = tf.equal(num_filtered_patches.assign_add(1) % 10, 0)
        #    return tf.cond(pred,true_fn=_true_fn_lvl2,false_fn=_false_fn_lvl2)
        #return tf.cond(no_background,true_fn=true_fn, false_fn=false_fn)

    if args.no_filter == True:
        dataset = patches_dataset
    else:
        dataset = patches_dataset.map(add_background_info)
        filtered_elements_dataset = dataset.filter(
            lambda sample: tf.logical_not(sample['no_background']))

        def change_label(sample):
            return {
                'patch':
                sample['patch'],
                'label':
                tf.reshape(
                    tf.convert_to_tensor(len(args.labels), dtype=tf.int64),
                    [1])
            }

        filtered_elements_dataset = filtered_elements_dataset.map(change_label)
        filtered_dataset = dataset.filter(lambda sample: sample[
            'no_background']).map(lambda sample: {
                'patch': sample['patch'],
                'label': sample['label']
            })
        dataset = tf.data.experimental.sample_from_datasets(
            [filtered_dataset, filtered_elements_dataset],
            weights=[0.95, 0.05])

    dataset = dataset.map(lambda sample: (sample['patch'], sample['label']))
    dataset = dataset.take(args.num_samples).shuffle(100000)

    writer = tf.io.TFRecordWriter(args.output_dataset)

    # Make file readable for all users
    cutil.publish(args.output_dataset)

    def _encode_func(sample):
        patch_np = sample[0].numpy().flatten()
        label_np = sample[1].numpy()
        return ctfd.encode({
            'patch': ctf.float_feature(patch_np),
            'label': ctf.int64_feature(label_np)
        })

    # Iterate over whole dataset and write serialized examples to file.
    # See: https://www.tensorflow.org/versions/r1.12/api_docs/python/tf/contrib/eager/Iterator
    for sample in tfe.Iterator(dataset):
        example = _encode_func(sample)
        writer.write(example.SerializeToString())

    # Flush and close the writer.
    writer.flush()
    writer.close()
Пример #16
0
def main(argv):
    parser = argparse.ArgumentParser(description='TODO')
    parser.add_argument('config',
                        type=str,
                        help='Path to configuration file to use.')
    parser.add_argument(
        'mean',
        type=str,
        help='Path to npy file holding mean for normalization.')
    parser.add_argument(
        'variance',
        type=str,
        help='Path to npy file holding variance for normalization.')
    parser.add_argument('model_dir',
                        type=str,
                        help='Path to saved model to use for inference.')
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def _normalize_op(features):
        channels = [
            tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) /
                           stddev[channel], -1) for channel in range(3)
        ]
        features['patch'] = tf.concat(channels, 2)
        return features

    cutil.make_directory(args.model_dir)
    cutil.publish(args.model_dir)

    config_path = args.config
    config = ctfm.parse_json(config_path)

    config_datasets = config.get('datasets')
    config_model = config.get('model')

    train_fn = ctfd.construct_train_fn(config_datasets,
                                       operations=[_normalize_op])

    steps = int(
        config_datasets.get('training').get('size') /
        config_datasets.get('batch'))

    params_dict = {
        'config': config_model,
        'model_dir': args.model_dir,
        'mean': mean,
        'stddev': stddev
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=args.model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=args.model_dir,
                                            save_summary_steps=1000,
                                            log_step_count_steps=1000))

    if not os.path.exists(
            os.path.join(args.model_dir, os.path.basename(config_path))):
        shutil.copy2(config_path, args.model_dir)

    for epoch in range(config_datasets.get('training').get('epochs')):
        classifier = classifier.train(input_fn=train_fn, steps=steps)

    export_dir = os.path.join(args.model_dir, 'saved_model')
    cutil.make_directory(export_dir)
    cutil.publish(export_dir)

    cutil.publish(args.model_dir)
    cutil.publish(export_dir)
Пример #17
0
def main(argv):
    parser = argparse.ArgumentParser(description='Compute similarity heatmaps of windows around landmarks.')
    parser.add_argument('export_dir',type=str,help='Path to saved model.')
    parser.add_argument('mean', type=str, help='Path to npy file holding mean for normalization.')
    parser.add_argument('variance', type=str, help='Path to npy file holding variance for normalization.')
    parser.add_argument('source_filename', type=str,help='Image file from which to extract patch.')
    parser.add_argument('source_image_size', type=int, nargs=2, help='Size of the input image, HW.')
    parser.add_argument('source_landmarks', type=str,help='CSV file from which to extract the landmarks for source image.')
    parser.add_argument('target_filename', type=str,help='Image file for which to create the heatmap.')
    parser.add_argument('target_image_size', type=int, nargs=2, help='Size of the input image for which to create heatmap, HW.')
    parser.add_argument('target_landmarks', type=str,help='CSV file from which to extract the landmarks for target image.')
    parser.add_argument('patch_size', type=int, help='Size of image patch.')
    parser.add_argument('output', type=str)
    parser.add_argument('--method', dest='method', type=str, help='Method to use to measure similarity, one of KLD, SKLD, BD, HD, SQHD.')
    parser.add_argument('--stain_code_size', type=int, dest='stain_code_size', default=0,
        help='Optional: Size of the stain code to use, which is skipped for similarity estimation')
    parser.add_argument('--rotate', type=float, dest='angle', default=0,
        help='Optional: rotation angle to rotate target image')
    parser.add_argument('--subsampling_factor', type=int, dest='subsampling_factor', default=1, help='Factor to subsample source and target image.')
    parser.add_argument('--region_size', type=int, default=64)
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def denormalize(image):
        channels = [np.expand_dims(image[:,:,channel] * stddev[channel] + mean[channel],-1) for channel in range(3)]
        denormalized_image = ctfi.rescale(np.concatenate(channels, 2), 0.0, 1.0)
        return denormalized_image

    def normalize(image, name=None, num_channels=3):
        channels = [tf.expand_dims((image[:,:,:,channel] - mean[channel]) / stddev[channel],-1) for channel in range(num_channels)]
        return tf.concat(channels, num_channels)

    latest_checkpoint = tf.train.latest_checkpoint(args.export_dir)   
    saver = tf.train.import_meta_graph(latest_checkpoint + '.meta', import_scope='imported')

    config = tf.ConfigProto()
    config.allow_soft_placement=True
    #config.log_device_placement=True

    # Load image and extract patch from it and create distribution.
    source_image = tf.expand_dims(ctfi.subsample(ctfi.load(args.source_filename,height=args.source_image_size[0], width=args.source_image_size[1]),args.subsampling_factor),0)
    args.source_image_size = list(map(lambda x: int(x / args.subsampling_factor), args.source_image_size))

    #Load image for which to create the heatmap
    target_image = tf.expand_dims(ctfi.subsample(ctfi.load(args.target_filename,height=args.target_image_size[0], width=args.target_image_size[1]),args.subsampling_factor),0)
    args.target_image_size = list(map(lambda x: int(x / args.subsampling_factor), args.target_image_size))

    source_landmarks = get_landmarks(args.source_landmarks, args.subsampling_factor)
    target_landmarks = get_landmarks(args.target_landmarks, args.subsampling_factor)

    region_size = args.region_size
    region_center = [int(region_size / 2),int(region_size / 2)]
    num_patches = region_size**2

    possible_splits = cutil.get_divisors(num_patches)
    num_splits = possible_splits.pop(0)

    while num_patches / num_splits > 512 and len(possible_splits) > 0:
        num_splits = possible_splits.pop(0)

    split_size = int(num_patches / num_splits)

    offset = 64
    center_idx = np.prod(region_center)

    X, Y = np.meshgrid(range(offset, region_size + offset), range(offset, region_size + offset))
    coords = np.concatenate([np.expand_dims(Y.flatten(),axis=1),np.expand_dims(X.flatten(),axis=1)],axis=1)

    coords_placeholder = tf.placeholder(tf.float32, shape=[split_size, 2])

    source_landmark_placeholder = tf.placeholder(tf.float32, shape=[1, 2])
    target_landmark_placeholder = tf.placeholder(tf.float32, shape=[1, 2])

    source_image_region = tf.image.extract_glimpse(source_image,[region_size + 2*offset, region_size+ 2*offset], source_landmark_placeholder, normalized=False, centered=False)
    target_image_region = tf.image.extract_glimpse(target_image,[region_size + 2*offset, region_size+ 2*offset], target_landmark_placeholder, normalized=False, centered=False)

    source_patches_placeholder = tf.map_fn(lambda x: get_patch_at(x, source_image, args.patch_size), source_landmark_placeholder, parallel_iterations=8, back_prop=False)[0]
    target_patches_placeholder = tf.squeeze(tf.map_fn(lambda x: get_patch_at(x, target_image_region, args.patch_size), coords_placeholder, parallel_iterations=8, back_prop=False))


    with tf.Session(config=config).as_default() as sess:
        saver.restore(sess, latest_checkpoint)

        source_patches_cov, source_patches_mean = tf.contrib.graph_editor.graph_replace([sess.graph.get_tensor_by_name('imported/z_log_sigma_sq/BiasAdd:0'),sess.graph.get_tensor_by_name('imported/z_mean/BiasAdd:0')] ,{ sess.graph.get_tensor_by_name('imported/patch:0'): normalize(source_patches_placeholder) })
        source_patches_distribution = tf.contrib.distributions.MultivariateNormalDiag(source_patches_mean[:,args.stain_code_size:], tf.exp(source_patches_cov[:,args.stain_code_size:]))
        
        target_patches_cov, target_patches_mean = tf.contrib.graph_editor.graph_replace([sess.graph.get_tensor_by_name('imported/z_log_sigma_sq/BiasAdd:0'),sess.graph.get_tensor_by_name('imported/z_mean/BiasAdd:0')] ,{ sess.graph.get_tensor_by_name('imported/patch:0'): normalize(target_patches_placeholder) })
        target_patches_distribution = tf.contrib.distributions.MultivariateNormalDiag(target_patches_mean[:,args.stain_code_size:], tf.exp(target_patches_cov[:,args.stain_code_size:]))

        similarities_skld = source_patches_distribution.kl_divergence(target_patches_distribution) + target_patches_distribution.kl_divergence(source_patches_distribution)
        similarities_bd = ctf.bhattacharyya_distance(source_patches_distribution, target_patches_distribution)
        similarities_sad = tf.reduce_sum(tf.abs(source_patches_placeholder - target_patches_placeholder), axis=[1,2,3])

        source_patches_grayscale = tf.image.rgb_to_grayscale(source_patches_placeholder)
        target_patches_grayscale = tf.image.rgb_to_grayscale(target_patches_placeholder)

        similarities_nmi = tf.map_fn(lambda x: nmi_tf(tf.squeeze(source_patches_grayscale), tf.squeeze(x), 20), target_patches_grayscale)

        with open(args.output + "_" + str(region_size) + ".csv",'wt') as outfile:
            fp = csv.DictWriter(outfile, ["method", "landmark", "min_idx", "min_idx_value", "rank", "landmark_value"])
            methods = ["SKLD", "BD", "SAD", "MI"]
            fp.writeheader()
            
            results = []

            for k in range(len(source_landmarks)):

                heatmap_fused = np.ndarray((region_size, region_size, len(methods)))
                feed_dict={source_landmark_placeholder: [source_landmarks[k,:]], target_landmark_placeholder: [target_landmarks[k,:]] }
                
                for i in range(num_splits):
                    start = i * split_size
                    end = start + split_size
                    batch_coords = coords[start:end,:]

                    feed_dict.update({coords_placeholder: batch_coords})

                    similarity_values = np.array(sess.run([similarities_skld,similarities_bd, similarities_sad, similarities_nmi],feed_dict=feed_dict)).transpose()
                    #heatmap.extend(similarity_values)
                    for idx, val in zip(batch_coords, similarity_values):
                        heatmap_fused[idx[0] - offset, idx[1] - offset] = val

                for c in range(len(methods)):
                    heatmap = heatmap_fused[:,:,c]
                    if c == 3:
                        min_idx = np.unravel_index(np.argmax(heatmap),heatmap.shape)
                        min_indices = np.array(np.unravel_index(list(reversed(np.argsort(heatmap.flatten()))),heatmap.shape)).transpose().tolist()
                    else:
                        min_idx = np.unravel_index(np.argmin(heatmap),heatmap.shape)
                        min_indices = np.array(np.unravel_index(np.argsort(heatmap.flatten()),heatmap.shape)).transpose().tolist()

                    landmark_value = heatmap[region_center[0], region_center[1]]
                    rank = min_indices.index(region_center)

                    fp.writerow({"method": methods[c],"landmark": k, "min_idx": min_idx, "min_idx_value": heatmap[min_idx[0], min_idx[1]],"rank": rank , "landmark_value": landmark_value})
                    #matplotlib.image.imsave(args.output + "_" + str(region_size)+ "_"+ methods[c] + "_" + str(k) + ".jpeg", heatmap, cmap='plasma')
                outfile.flush()

                print(min_idx, rank)
        
        
            fp.writerows(results)


        sess.close()
        
    return 0
Пример #18
0
def parse_component(inputs: dict, config: dict, outputs: dict):
    """
    Function to parse a dict holding the description for a component.
    A component is defined by an input and a number of layers.

    This function is supposed to be called in the model function of a tf.Estimator and eases model creation.

    The input description is used to build the feature_column and input layer.
    The input is then extended with batch dimension.

    Parameters
    ----------
    inputs: dict mapping from string to input tensor.

    config: dict holding keys 'input' for input speciication and 'layers', the list of layers after the input.

    outputs: dict to which to append this config output

    Returns
    -------
    layers: list(tf.layers.Layer), all layers added for this component.
            Layers not inheriting from tf.layers.Layer are passed as functions.

    variables: list(tf.Variable), list of all variables associated with the layers of this component.

    function: callable which performs a forward pass of features through the network.
    """

    layers = list()
    variables = list()
    funcs = list()

    # Get input shape for following layers
    shape = None
    if type(config['input']) != list:
        shape = inputs[config['input']].get_shape()
    else:
        shape = [inputs[key].get_shape() for key in config['input']]

    # Parse each layer specified in layers and append them to collections.
    for desc in config['layers']:
        layer, variable, function, shape = parse_layer(shape, desc)
        if layer is not None:
            layers.append(layer)
        if variable is not None:
            variables.append(variable)
        funcs.append(function)

    function = cutil.concatenate_functions(funcs)
    output_tensors = function(inputs[config['input']])

    if isinstance(config['output'], collections.Iterable) and isinstance(
            output_tensors, tuple):
        for key, value in zip(config['output'], output_tensors):
            if isinstance(value, tf.Tensor):
                outputs.update({key: tf.identity(value, name=key)})
            else:
                outputs.update({key: value})
    else:
        outputs.update({
            config['output']:
            tf.identity(output_tensors, name=config['output'])
        })
    return layers, variables, function
Пример #19
0
def main(argv):
    parser = argparse.ArgumentParser(description='TODO')
    parser.add_argument('config',
                        type=str,
                        help='Path to configuration file to use.')
    parser.add_argument(
        'mean',
        type=str,
        help='Path to npy file holding mean for normalization.')
    parser.add_argument(
        'variance',
        type=str,
        help='Path to npy file holding variance for normalization.')
    parser.add_argument('model_dir',
                        type=str,
                        help='Path to saved model to use for inference.')
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def _normalize_op(features):
        channels = [
            tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) /
                           stddev[channel], -1) for channel in range(3)
        ]
        features['patch'] = tf.concat(channels, 2)
        return features

    def _subsampling_op(features):
        features['patch'] = ctfi.subsample(features['patch'], 2)
        return features

    cutil.make_directory(args.model_dir)
    cutil.publish(args.model_dir)

    config_path = args.config
    config = ctfm.parse_json(config_path)

    config_datasets = config.get('datasets')
    config_model = config.get('model')

    train_fn = ctfd.construct_train_fn(config_datasets,
                                       operations=[_normalize_op])
    #def train_fn():
    #    dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(256,32,32,3))
    #    dataset = dataset.map(lambda x : ({"patch": x}, 0)).batch(256).repeat()
    #    return dataset

    steps = int(
        config_datasets.get('training').get('size') /
        config_datasets.get('batch'))

    params_dict = {'config': config_model, 'model_dir': args.model_dir}

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=args.model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=args.model_dir,
                                            save_summary_steps=1000,
                                            log_step_count_steps=1000))

    if not os.path.exists(
            os.path.join(args.model_dir, os.path.basename(config_path))):
        shutil.copy2(config_path, args.model_dir)

    for epoch in range(config_datasets.get('training').get('epochs')):
        classifier = classifier.train(input_fn=train_fn, steps=steps)

    export_dir = os.path.join(args.model_dir, 'saved_model')
    cutil.make_directory(export_dir)
    cutil.publish(export_dir)

    # TODO: Write command to create serving input receiver fn from config.
    serving_input_receiver_fn = ctfd.construct_serving_fn(
        config_model['inputs'])

    classifier.export_saved_model(export_dir, serving_input_receiver_fn)
    cutil.publish(args.model_dir)
    cutil.publish(export_dir)
Пример #20
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute codes and reconstructions for image.')
    parser.add_argument('export_dir', type=str, help='Path to saved model.')
    parser.add_argument(
        'mean',
        type=str,
        help='Path to npy file holding mean for normalization.')
    parser.add_argument(
        'variance',
        type=str,
        help='Path to npy file holding variance for normalization.')
    parser.add_argument('source_filename',
                        type=str,
                        help='Image file from which to extract patch.')
    parser.add_argument('source_image_size',
                        type=int,
                        nargs=2,
                        help='Size of the input image, HW.')
    parser.add_argument('target_filename',
                        type=str,
                        help='Image file for which to create the heatmap.')
    parser.add_argument(
        'target_image_size',
        type=int,
        nargs=2,
        help='Size of the input image for which to create heatmap, HW.')
    parser.add_argument('patch_size', type=int, help='Size of image patch.')
    parser.add_argument(
        '--method',
        dest='method',
        type=str,
        help=
        'Method to use to measure similarity, one of KLD, SKLD, BD, HD, SQHD.')
    parser.add_argument(
        '--stain_code_size',
        type=int,
        dest='stain_code_size',
        default=0,
        help=
        'Optional: Size of the stain code to use, which is skipped for similarity estimation'
    )
    parser.add_argument('--rotate',
                        type=float,
                        dest='angle',
                        default=0,
                        help='Optional: rotation angle to rotate target image')
    parser.add_argument('--subsampling_factor',
                        type=int,
                        dest='subsampling_factor',
                        default=1,
                        help='Factor to subsample source and target image.')
    args = parser.parse_args()

    mean = np.load(args.mean)
    variance = np.load(args.variance)
    stddev = [np.math.sqrt(x) for x in variance]

    def denormalize(image):
        channels = [
            np.expand_dims(
                image[:, :, channel] * stddev[channel] + mean[channel], -1)
            for channel in range(3)
        ]
        denormalized_image = ctfi.rescale(np.concatenate(channels, 2), 0.0,
                                          1.0)
        return denormalized_image

    def normalize(image, name=None, num_channels=3):
        channels = [
            tf.expand_dims(
                (image[:, :, :, channel] - mean[channel]) / stddev[channel],
                -1) for channel in range(num_channels)
        ]
        return tf.concat(channels, num_channels)

    latest_checkpoint = tf.train.latest_checkpoint(args.export_dir)
    saver = tf.train.import_meta_graph(latest_checkpoint + '.meta',
                                       import_scope='imported')

    config = tf.ConfigProto()
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_options.report_tensor_allocations_upon_oom = True
    #config.gpu_options.allow_growth = True

    # Load image and extract patch from it and create distribution.
    source_image = ctfi.subsample(
        ctfi.load(args.source_filename,
                  height=args.source_image_size[0],
                  width=args.source_image_size[1]), args.subsampling_factor)
    args.source_image_size = list(
        map(lambda x: int(x / args.subsampling_factor),
            args.source_image_size))

    #Load image for which to create the heatmap
    target_image = ctfi.subsample(
        ctfi.load(args.target_filename,
                  height=args.target_image_size[0],
                  width=args.target_image_size[1]), args.subsampling_factor)
    args.target_image_size = list(
        map(lambda x: int(x / args.subsampling_factor),
            args.target_image_size))

    heatmap_size = list(
        map(lambda v: max(v[0], v[1]),
            zip(args.source_image_size, args.target_image_size)))

    source_image = tf.expand_dims(
        tf.image.resize_image_with_crop_or_pad(source_image, heatmap_size[0],
                                               heatmap_size[1]), 0)
    target_image = tf.expand_dims(
        tf.image.resize_image_with_crop_or_pad(target_image, heatmap_size[0],
                                               heatmap_size[1]), 0)

    num_patches = np.prod(heatmap_size, axis=0)

    possible_splits = cutil.get_divisors(num_patches)
    num_splits = possible_splits.pop(0)

    while num_patches / num_splits > 500 and len(possible_splits) > 0:
        num_splits = possible_splits.pop(0)

    split_size = int(num_patches / num_splits)

    X, Y = np.meshgrid(range(heatmap_size[1]), range(heatmap_size[0]))

    coords = np.concatenate([
        np.expand_dims(Y.flatten(), axis=1),
        np.expand_dims(X.flatten(), axis=1)
    ],
                            axis=1)

    #source_patches_placeholder = tf.placeholder(tf.float32, shape=[num_patches / num_splits, args.patch_size, args.patch_size, 3])
    #target_patches_placeholder = tf.placeholder(tf.float32, shape=[num_patches / num_splits, args.patch_size, args.patch_size, 3])

    #all_source_patches = ctfi.extract_patches(source_image, args.patch_size, strides=[1,1,1,1], padding='SAME')
    #all_target_patches = ctfi.extract_patches(target_image, args.patch_size, strides=[1,1,1,1], padding='SAME')

    #source_patches = tf.split(all_source_patches, num_splits)
    #target_patches = tf.split(all_target_patches, num_splits)

    #patches = zip(source_patches, target_patches)

    coords_placeholder = tf.placeholder(tf.float32, shape=[split_size, 2])

    source_patches_placeholder = tf.squeeze(
        tf.map_fn(lambda x: get_patch_at(x, source_image, args.patch_size),
                  coords_placeholder,
                  parallel_iterations=8,
                  back_prop=False))
    target_patches_placeholder = tf.squeeze(
        tf.map_fn(lambda x: get_patch_at(x, target_image, args.patch_size),
                  coords_placeholder,
                  parallel_iterations=8,
                  back_prop=False))

    heatmap = np.ndarray(heatmap_size)

    with tf.Session(graph=tf.get_default_graph(),
                    config=config).as_default() as sess:
        source_patches_cov, source_patches_mean = tf.contrib.graph_editor.graph_replace(
            [
                sess.graph.get_tensor_by_name(
                    'imported/z_log_sigma_sq/BiasAdd:0'),
                sess.graph.get_tensor_by_name('imported/z_mean/BiasAdd:0')
            ], {
                sess.graph.get_tensor_by_name('imported/patch:0'):
                normalize(source_patches_placeholder)
            })
        source_patches_distribution = tf.contrib.distributions.MultivariateNormalDiag(
            source_patches_mean[:, args.stain_code_size:],
            tf.exp(source_patches_cov[:, args.stain_code_size:]))

        target_patches_cov, target_patches_mean = tf.contrib.graph_editor.graph_replace(
            [
                sess.graph.get_tensor_by_name(
                    'imported/z_log_sigma_sq/BiasAdd:0'),
                sess.graph.get_tensor_by_name('imported/z_mean/BiasAdd:0')
            ], {
                sess.graph.get_tensor_by_name('imported/patch:0'):
                normalize(target_patches_placeholder)
            })
        target_patches_distribution = tf.contrib.distributions.MultivariateNormalDiag(
            target_patches_mean[:, args.stain_code_size:],
            tf.exp(target_patches_cov[:, args.stain_code_size:]))

        similarity = source_patches_distribution.kl_divergence(
            target_patches_distribution
        ) + target_patches_distribution.kl_divergence(
            source_patches_distribution)
        #similarity = ctf.bhattacharyya_distance(source_patches_distribution, target_patches_distribution)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        saver.restore(sess, latest_checkpoint)

        for i in range(num_splits):
            start = i * split_size
            end = start + split_size
            batch_coords = coords[start:end, :]
            feed_dict = {coords_placeholder: batch_coords}
            similarity_values = sess.run(similarity,
                                         feed_dict=feed_dict,
                                         options=run_options)
            #heatmap.extend(similarity_values)
            for idx, val in zip(batch_coords, similarity_values):
                heatmap[idx[0], idx[1]] = val

        heatmap_sad = sess.run(
            tf.reduce_mean(tf.squared_difference(source_image, target_image),
                           axis=3))[0]

        #sim_heatmap = np.reshape(heatmap, heatmap_size, order='C')
        sim_heatmap = heatmap

        fig_images, ax_images = plt.subplots(1, 2)
        ax_images[0].imshow(sess.run(source_image)[0])
        ax_images[1].imshow(sess.run(target_image)[0])

        fig_similarities, ax_similarities = plt.subplots(1, 2)
        heatmap_skld_plot = ax_similarities[0].imshow(sim_heatmap,
                                                      cmap='plasma')
        heatmap_sad_plot = ax_similarities[1].imshow(heatmap_sad,
                                                     cmap='plasma')

        fig_similarities.colorbar(heatmap_skld_plot, ax=ax_similarities[0])
        fig_similarities.colorbar(heatmap_sad_plot, ax=ax_similarities[1])

        plt.show()

        sess.close()
    return 0
Пример #21
0
def main(argv):
    incr_and_double = cutil.concatenate_functions([increment, double])
    print(incr_and_double(1))
Пример #22
0
def main(argv):
    y = cutil.pipeline(1, [increment, double])
    print(y)