Exemplo n.º 1
0
def _as_activation(x):
    if x is None:
        return layers.Layer()
    if isinstance(x, layers.Layer):
        return x
    else:
        return layers.Activation(x)
Exemplo n.º 2
0
 def __init__(self, **kwargs):
     super(cin, self).__init__(**kwargs)
     self.filters = []
     self.bias = []
     self.field_nums = []
     self.field_nums.append(field_size)
     for i, layer_size in enumerate(cross_layer):
         _filter = layers.Layer().add_weight(
             name='filter_%s' % i,
             shape=(1, self.field_nums[0] * self.field_nums[-1],
                    layer_size),
             dtype=tf.float32,
             initializer=tf.keras.initializers.GlorotNormal())
         _bias = layers.Layer().add_weight(
             name='bias_%s' % i,
             shape=(layer_size, ),
             dtype=tf.float32,
             initializer=tf.keras.initializers.zeros())
         self.filters.append(_filter)
         self.bias.append(_bias)
         self.field_nums.append(layer_size)
Exemplo n.º 3
0
    def create_model(self):
        """Create a Model object which can be used for training."""
        # Input is the 2D image size plus None for the depth and then a 1-dimension representing the channels
        model_input = layers.Input((*self.input_size[:2], None, 1),
                                   name="model_in")
        model = models.Model(inputs=model_input,
                             outputs=self.define_architecture(model_input))

        if self.quality_weighted_mode:
            qw_in = layers.Input(shape=(None, ), name="qw_in")
            return models.Model(
                inputs=[model.input, qw_in],
                outputs=[layers.Layer(name="qw_out")(qw_in), model.output],
            )
        else:
            return model
Exemplo n.º 4
0
    def create_model(self):
        """Create a Model object which can be used for training."""
        # Input is the 2D image size plus a dimension representing the channels
        model_input = layers.Input((*self.input_size, 1),
                                   name="model_in",
                                   dtype=tf.float32)
        model = models.Model(inputs=model_input,
                             outputs=self.define_architecture(model_input))

        if self.quality_weighted_mode:
            in_shape = self.input_size[-1] if len(self.input_size) > 2 else 1
            qw_in = layers.Input(in_shape, name="qw_in")
            return models.Model(
                inputs=[model.input, qw_in],
                outputs=[layers.Layer(name="qw_out")(qw_in), model.output],
            )
        else:
            return model
Exemplo n.º 5
0
    def __init__(self,
                 filters: int,
                 kernel_size: int,
                 name: str,
                 dilation_rate: int = 1,
                 kernel_initializer: str = 'he_normal',
                 padding: str = 'causal',
                 **kwargs):

        super().__init__(name, **kwargs)
        self.conv_layer = Conv2D(filters=filters,
                                 kernel_size=(1, kernel_size),
                                 dilation_rate=dilation_rate,
                                 padding='valid',
                                 name=name,
                                 kernel_initializer=kernel_initializer)
        if padding == 'causal':
            self.padding_layer = layers.ZeroPadding2D(
                ((0, 0), (2 * dilation_rate, 0)))
        else:
            self.padding_layer = layers.Layer()
Exemplo n.º 6
0
def svbrdf(num_classes):
    inputs = keras.Input(shape=(256, 256) + (3, ))
    x = layers.Layer()(inputs)
    previous_block_activation = x  # Set aside residual

    downfilters = np.array([128, 256, 512, 512, 512, 512, 512, 512])
    Upfilters = np.flip(np.copy(downfilters))

    for filters in downfilters:

        x = layers.LeakyReLU()(x)
        x = layers.SeparableConv2D(filters, 4, 2, padding="same")(x)
        x = layers.BatchNormalization()(x)
        # Project residual

        residual = layers.Conv2D(filters, 4, 2,
                                 padding="same")(previous_block_activation)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    ### [Second half of the network: upsampling inputs] ###

    for filters in Upfilters:
        x = layers.LeakyReLU()(x)
        x = layers.Conv2DTranspose(filters, 4, padding="same")(x)
        x = layers.BatchNormalization()(x)
        x = layers.UpSampling2D(2)(x)

        # Project residual
        residual = layers.UpSampling2D(2)(previous_block_activation)
        residual = layers.Conv2D(filters, 4, padding="same")(residual)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)
    model = keras.Model(inputs, outputs)
    return model
Exemplo n.º 7
0
    Model.add(layers.Flatten())
    Model.add(layers.Dense(8, activation='relu'))
    #mixed_input
    Model.add(layers.Dense(OutputDataSpaceSize))
#shower model input: measured gamma energy for each event z bin
#shower model output: single vector
#vox model input: measured gamma energy x,y,z bins
#vox model output:


#one to one mapping model
def shower_mixed():
    print("In shower model")
    Showermodel = models.Sequential()
    layer = layers.Layer()
    Showermodel.add(layer)
    return Showermodel

#voxnet model for mixed input
def vox_mixed():
    vox = models.Sequential()
    vox.add(layers.Conv3D(32, (3, 3, 3), activation='relu', padding="SAME"))
    vox.add(layers.BatchNormalization())
    vox.add(layers.MaxPooling3D((3, 3, 3)))
    vox.add(layers.Conv3D(64, (3, 3, 3), activation='relu', padding="SAME"))
    vox.add(layers.MaxPooling3D((3, 3, 3)))
    vox.add(layers.Conv3D(128, (3, 3, 3), activation='relu', padding="SAME"))
    vox.add(layers.Flatten())
    return vox
Exemplo n.º 8
0
 def Layer(filters, **kwargs_inner):
     layer = layers.Layer(**kwargs_inner)
     return lambda x: _single_layer_call(
         x, layer, _instance_norm(instance_norm, filters), activation)
Exemplo n.º 9
0
""" Standardized layers implemented in keras.
"""

import tensorflow
from tensorflow.keras import layers, activations
from tensorflow.keras.initializers import RandomNormal

try:
    from tensorflow_addons.layers import InstanceNormalization
except:
    import warnings

    InstanceNormalization = layers.Layer()
    warnings.warn(
        "DeepTrack not installed with tensorflow addons. Instance normalization will not work. Consider upgrading to tensorflow >= 2.0.",
        ImportWarning,
    )


def as_block(x):
    """Converts input to layer block"""
    if isinstance(x, str):
        if x in _string_to_block:
            return _string_to_block[x]
        else:
            raise ValueError(
                "Invalid blockname {0}, valid names are: ".format(x) +
                ", ".join(_string_to_block.keys()))
    if isinstance(x, layers.Layer) or not callable(x):
        raise TypeError(
            "Layer block should be a function that returns a keras Layer.")
Exemplo n.º 10
0
def get_frvsr(generator_model,
              flow_model,
              crop_size=32,
              learning_rate=0.0005,
              steps_per_execution=1):
    """
    Create FRVSR model.

    Inputs:
    - input (N x 10 x H x W x 3) - input frames
    - target (N x 10 x H * 2 x W * 2 x 3) - target frames

    Outputs:
    - gen_outputs (N x 10 x H * 2 x W * 2 x 3) - upscaled frames
    - target_warp (N x 10 x H * 2 x W * 2 x 3) - warped target frames

    Parameters
    ----------
    generator_model : keras.Model
        Generator
    flow_model : keras.Model
        Flow
    crop_size : int
        Image crop size
    learning_rate : float
        Learning rate
    steps_per_execution : int
        Steps per execution

    Returns
    -------
    keras.Model
        Model
    """
    inputs = keras.Input(shape=[10, crop_size, crop_size, 3], name="input")
    targets = keras.Input(shape=[10, crop_size * 2, crop_size * 2, 3],
                          name="target")
    input_frames = tf.reshape(inputs[:, 1:, :, :, :],
                              [-1, crop_size, crop_size, 3])
    input_frames_pre = tf.reshape(inputs[:, :-1, :, :, :],
                                  [-1, crop_size, crop_size, 3])
    target_frames_pre = tf.reshape(targets[:, :-1, :, :, :],
                                   [-1, crop_size * 2, crop_size * 2, 3])
    flow_lr = flow_model([input_frames_pre, input_frames])
    flow = UpscaleLayer()(flow_lr) * 2
    target_warp = DenseWarpLayer()([target_frames_pre, flow])
    target_warp = tf.reshape(target_warp,
                             [-1, 9, crop_size * 2, crop_size * 2, 3])
    flow = tf.reshape(flow, [-1, 9, crop_size * 2, crop_size * 2, 2])
    last_output = generator_model(
        [inputs[:, 0, :, :, :],
         tf.zeros_like(targets[:, 0, :, :, :])])
    gen_outputs = [last_output]
    for frame_i in range(9):
        cur_flow = flow[:, frame_i, :, :, :]
        gen_pre_output_warp = DenseWarpLayer()([last_output, cur_flow])
        last_output = generator_model(
            [inputs[:, frame_i + 1, :, :, :], gen_pre_output_warp])
        gen_outputs.append(last_output)
    gen_outputs = tf.reshape(tf.stack(gen_outputs, axis=1),
                             [-1, 10, crop_size * 2, crop_size * 2, 3])
    target_warp = layers.Layer(name="target_warp")(target_warp)
    gen_outputs = layers.Layer(name="gen_outputs")(gen_outputs)
    model = keras.Model(inputs=[inputs, targets],
                        outputs=[gen_outputs, target_warp])
    model.compile(loss=["mse", "mse"],
                  optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
                  steps_per_execution=steps_per_execution)
    return model