예제 #1
0
    def __init__(self, selected_model):
        self.n_classes = 1
        self.LeNet = Sequential([
            Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)),
            Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)),
            Conv2D(filters=24,
                   kernel_size=(5, 5),
                   activation='relu',
                   strides=(1, 1),
                   padding='valid'),
            MaxPooling2D((2, 2)),
            Conv2D(filters=36, kernel_size=(5, 5), activation='relu'),
            MaxPooling2D((2, 2)),
            Conv2D(filters=48, kernel_size=(5, 5), activation='relu'),
            MaxPooling2D((2, 2)),
            Conv2D(filters=64, kernel_size=(1, 1), activation='relu'),
            MaxPooling2D((2, 2)),
            Conv2D(filters=64, kernel_size=(1, 1), activation='relu'),
            MaxPooling2D((2, 2)),
            Flatten(),
            Dense(120, activation='relu', kernel_regularizer='l2'),
            Dense(84, activation='relu'),
            Dense(self.n_classes, activation='sigmoid')
        ])

        self.nvidia = Sequential([
            Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)),
            Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)),
            LayerNormalization(epsilon=0.001),
            Conv2D(filters=24,
                   kernel_size=(5, 5),
                   activation='relu',
                   strides=(3, 3),
                   padding='valid'),
            Conv2D(filters=36, kernel_size=(5, 5), activation='relu'),
            Conv2D(filters=48, kernel_size=(5, 5), activation='relu'),
            Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
            Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
            # Extra Layer
            Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
            Flatten(),
            Dense(1164, activation='relu'),
            Dense(100, activation='relu'),
            Dense(50, activation='relu'),
            Dense(10, activation='relu'),
            Dense(self.n_classes)
        ])

        self.inception = self.prepare_inception()

        if selected_model.lower() == 'lenet':
            self.current_model = self.LeNet
        elif selected_model.lower(
        ) == 'nvidia' or selected_model.lower() is None:
            self.current_model = self.nvidia
        elif selected_model.lower() == 'inception':
            self.selected_model = self.inception
        else:
            raise Exception(
                'Please select a valid model: LeNet, Nvidia or Inception')
예제 #2
0
    def yolo_conv(x_in):
        if isinstance(x_in, tuple):
            inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
            x, x_skip = inputs

            # concat with skip connection
            x = Conv2D_BN_Relu(x, filters, 1)
            x = UpSampling2D(2)(x)
            if (x.shape[1] != x_skip.shape[1] and x.shape[2] != x_skip.shape[2]):
                x = Cropping2D(cropping=((1,0), (1,0)), input_shape = x.shape[1:])(x)
            elif (x.shape[1] != x_skip.shape[1] and x.shape[2] == x_skip.shape[2]):
                x = Cropping2D(cropping=((1,0), (0,0)), input_shape = x.shape[1:])(x)
            elif (x.shape[1] == x_skip.shape[1] and x.shape[2] != x_skip.shape[2]):
                x = Cropping2D(cropping=((0,0), (1,0)), input_shape = x.shape[1:])(x)
                
            x = Concatenate()([x, x_skip])
        else:
            x = inputs = Input(x_in.shape[1:])

        x = Conv2D_BN_Relu(x, filters, 1)
        x = Conv2D_BN_Relu(x, filters * 2, 3)
        x = Conv2D_BN_Relu(x, filters, 1)
        x = Conv2D_BN_Relu(x, filters * 2, 3)
        x = Conv2D_BN_Relu(x, filters, 1)
        return Model(inputs, x, name=name)(x_in)
예제 #3
0
def _pad_or_crop_to_shape_2D(x, in_shape, tgt_shape):
    '''
    in_shape, tgt_shape are both 2x1 numpy arrays
    '''
    in_shape = np.asarray(in_shape)
    tgt_shape = np.asarray(tgt_shape)
    print('Padding input from {} to {}'.format(in_shape, tgt_shape))
    im_diff = in_shape - tgt_shape
    if im_diff[0] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[0]) / 2.0)),
                   int(np.floor(abs(im_diff[0]) / 2.0)))
        x = ZeroPadding2D((pad_amt, (0, 0)))(x)
    if im_diff[1] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[1]) / 2.0)),
                   int(np.floor(abs(im_diff[1]) / 2.0)))
        x = ZeroPadding2D(((0, 0), pad_amt))(x)

    if im_diff[0] > 0:
        crop_amt = (int(np.ceil(im_diff[0] / 2.0)),
                    int(np.floor(im_diff[0] / 2.0)))
        x = Cropping2D((crop_amt, (0, 0)))(x)
    if im_diff[1] > 0:
        crop_amt = (int(np.ceil(im_diff[1] / 2.0)),
                    int(np.floor(im_diff[1] / 2.0)))
        x = Cropping2D(((0, 0), crop_amt))(x)
    return x
예제 #4
0
    def __init__(self, direction, size=1, **kwargs):
        self.size = size
        self.direction = direction
        super(Shift, self).__init__(**kwargs)

        if self.direction == "down":
            self.pad = ZeroPadding2D(padding=((self.size, 0), (0, 0)), data_format="channels_last")
            self.crop = Cropping2D(((0, self.size), (0, 0)))
        elif self.direction == "right":
            self.pad = ZeroPadding2D(padding=((0, 0), (self.size, 0)), data_format="channels_last")
            self.crop = Cropping2D(((0, 0), (0, self.size)))
예제 #5
0
 def resize_layer(conv, deconv): 
     if tf.keras.backend.image_data_format() == "channels_first":
         if deconv.get_shape().as_list()[2] > conv.get_shape().as_list()[2]:
             deconv = Cropping2D(cropping=((0, 1), (0, 0)))(deconv)
         if deconv.get_shape().as_list()[3] > conv.get_shape().as_list()[3]:
             deconv = Cropping2D(cropping=((0, 0), (0, 1)))(deconv)
     else:
         if deconv.get_shape().as_list()[1] > conv.get_shape().as_list()[1]:
             deconv = Cropping2D(cropping=((0, 1), (0, 0)))(deconv)
         if deconv.get_shape().as_list()[2] > conv.get_shape().as_list()[2]:
             deconv = Cropping2D(cropping=((0, 0), (0, 1)))(deconv)
     return deconv
예제 #6
0
def crop(tensor, size):
    """
    Crops the given tensor to the size of the second tensor
    """

    dx = int(tensor.shape[2] - size.shape[2])
    dy = int(tensor.shape[1] - size.shape[1])

    crop = Cropping2D(cropping=((0, dy), (0, 0)))(tensor)
    crop = Cropping2D(cropping=((0, 0), (0, dx)))(crop)

    return crop
예제 #7
0
def FashionMnist_classifier_full_bn():
    """
    The architecture of the single-output model
    """
    input_shape = (28, 28, 1)
    input_img = Input(shape=input_shape, name="Input", dtype='float32')
    conv1 = Conv2D(2, (3, 3), padding='same', name="conv2d_1",
                   trainable=True)(input_img)
    conv1 = (BatchNormalization(name='batch_normalization'))(conv1)
    conv1 = (Activation('relu', name='activation'))(conv1)
    conv2 = Conv2D(4, (3, 3), padding='same', name="conv2d_2",
                   trainable=True)(conv1)
    conv2 = (BatchNormalization(name='batch_normalization_1'))(conv2)
    conv2 = (Activation('relu', name='activation_1'))(conv2)
    conv2bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_1")(conv2)
    conv3 = Conv2D(8, (3, 3), padding='same', name="conv2d_3",
                   trainable=True)(conv2bis)
    conv3 = (BatchNormalization(name='batch_normalization_2'))(conv3)
    conv3 = (Activation('relu', name='activation_2'))(conv3)
    conv3bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_2")(conv3)
    conv4 = Conv2D(16, (3, 3), padding='same', name="conv2d_4",
                   trainable=True)(conv3bis)
    conv4 = (BatchNormalization(name='batch_normalization_3'))(conv4)
    conv4 = (Activation('relu', name='activation_3'))(conv4)
    conv4bis = MaxPooling2D(pool_size=(2, 2), name="max_pooling2d_3")(conv4)
    conv5 = Conv2D(32, (3, 3), padding='same', name="conv2d_5",
                   trainable=True)(conv4bis)
    conv5 = (BatchNormalization(name='batch_normalization_4'))(conv5)
    conv5 = (Activation('relu', name='activation_4'))(conv5)
    conv5bis = UpSampling2D(size=(2, 2), name='up_sampling2d_1')(conv5)
    conv4tris = Cropping2D(cropping=((1, 0), (1, 0)))(conv4)
    conv6 = Concatenate(name='concatenate_1', axis=3)([conv5bis, conv4tris])
    conv7 = Conv2D(16, (3, 3), padding='same', name="conv2d_6",
                   trainable=True)(conv6)
    conv7 = (BatchNormalization(name='batch_normalization_5',
                                trainable=True))(conv7)
    conv7 = (Activation('relu', name='activation_5'))(conv7)
    conv7bis = UpSampling2D(size=(2, 2), name='up_sampling2d_2')(conv7)
    conv3tris = Cropping2D(cropping=((1, 1), (1, 1)))(conv3)
    conv8 = Concatenate(name='concatenate_2', axis=3)([conv7bis, conv3tris])
    conv9 = Conv2D(16, (3, 3), padding='same', name="conv2d_7",
                   trainable=True)(conv8)
    conv9 = (BatchNormalization(name='batch_normalization_6',
                                trainable=True))(conv9)
    conv9 = (Activation('relu', name='activation_6'))(conv9)
    res3 = GlobalAveragePooling2D()(conv9)
    res3 = Dense(10, name="fc3", trainable=True)(res3)
    res3 = Activation('softmax', name='fine')(res3)
    final_result = res3
    model = Model(inputs=input_img, outputs=final_result)
    return (model)
예제 #8
0
def createModel_Unet(row, col, depth):

    # channel last , tensorflow

    inputShape = (row, col, depth)

    X_input = Input(inputShape)

    c1 = unet_convblock(X_input, 32, (3, 3))

    p1 = MaxPooling2D(pool_size=(2, 2))(c1)
    c2 = unet_convblock(p1, 64, (3, 3))

    p2 = MaxPooling2D(pool_size=(2, 2))(c2)
    c3 = unet_convblock(p2, 128, (3, 3))

    p3 = MaxPooling2D(pool_size=(2, 2))(c3)
    c4 = unet_convblock(p3, 256, (3, 3))

    p4 = MaxPooling2D(pool_size=(2, 2))(c4)
    c5 = unet_convblock(p4, 512, (3, 3))

    d6 = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same')(c5)
    crop_c4 = Cropping2D(cropping=(get_crop_size(c4, d6)))(c4)
    d6 = Concatenate()([d6, crop_c4])
    c6 = unet_convblock(d6, 256, (3, 3))

    d7 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same')(c6)
    crop_c3 = Cropping2D(cropping=(get_crop_size(c3, d7)))(c3)
    d7 = Concatenate()([d7, crop_c3])
    c7 = unet_convblock(d7, 128, (3, 3))

    d8 = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same')(c7)
    crop_c2 = Cropping2D(cropping=(get_crop_size(c2, d8)))(c2)
    d8 = Concatenate()([d8, crop_c2])
    c8 = unet_convblock(d8, 64, (3, 3))

    d9 = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same')(c8)
    crop_c1 = Cropping2D(cropping=(get_crop_size(c1, d9)))(c1)
    d9 = Concatenate()([d9, crop_c1])
    c9 = unet_convblock(d9, 32, (3, 3))

    ch, cw = get_crop_size(c1, c9)
    c9 = ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(c9)
    output = Conv2D(1, (1, 1), padding='same')(c9)
    output = Activation('sigmoid')(output)

    model = models.Model(inputs=X_input, outputs=output, name='Unet')

    return model
예제 #9
0
def deep_cascade_unet(depth_str='ki',
                      H=218,
                      W=170,
                      Hpad=3,
                      Wpad=3,
                      kshape=(3, 3),
                      channels=22):

    inputs = Input(shape=(H, W, channels))
    mask = Input(shape=(H, W, channels))
    layers = [inputs]
    kspace_flag = True
    for ii in depth_str:

        if ii == 'i':
            # Add IFFT
            layers.append(Lambda(ifft_layer)(layers[-1]))
            kspace_flag = False
        # Add CNN block
        layers.append(ZeroPadding2D(padding=(Hpad, Wpad))(layers[-1]))
        layers.append(unet_block(layers[-1], kshape, channels))
        layers.append(Cropping2D(cropping=(Hpad, Wpad))(layers[-1]))

        # Add DC block
        layers.append(
            DC_block(layers[-1], mask, inputs, channels, kspace=kspace_flag))
        kspace_flag = True
    out = Lambda(ifft_layer)(layers[-1])
    model = Model(inputs=[inputs, mask], outputs=out)
    return model
예제 #10
0
def _decodeBlock(x,
                 shortcut,
                 rows_odd,
                 cols_odd,
                 cweights,
                 bns,
                 activation=LeakyReLU(alpha=ALPHA)):
    #Add zero padding on bottom and right if odd dimension required at output,
    #giving an output of one greater than required
    x = ZeroPadding2D(padding=((0, rows_odd), (0, cols_odd)))(x)
    # x = UpSampling2D(size=(2,2), interpolation=UPSAMPLE_INTERP)(x)

    # up_size = np.array(x.shape)
    # up_size[1] *= 2
    # up_size[2] *= 2
    # x = bicubic_interp_2d(x,(up_size[1],up_size[2]))

    x = upsample_helper(x)

    #If padding was added, crop the output to match the target shape
    #print(rows_odd)
    #print(cols_odd)
    x = Cropping2D(cropping=((0, rows_odd), (0, cols_odd)))(x)

    x = Concatenate()([shortcut, x])

    x = res_Block(x, cweights, bns, activation=LeakyReLU(alpha=ALPHA))

    return x
예제 #11
0
def Nvidia_model():
    input_shape = (160, 320, 3)
    model = Sequential()
    model.add(Lambda(lambda x: x/127.5 - 1., input_shape=input_shape))
    model.add(Cropping2D(cropping=((50, 20), (0, 0))))
    model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='elu'))
    model.add(Dropout(0.5))
    model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='elu'))
    model.add(Dropout(0.5))
    model.add(Conv2D(48, (3, 3), strides=(1, 1), activation='elu'))
    model.add(Dropout(0.5))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='elu'))
    model.add(Dropout(0.5))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='elu'))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(100, activation='elu'))
    model.add(Dropout(0.5))
    model.add(Dense(50, activation='elu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='elu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.summary()
    return model
예제 #12
0
    def _merger(self, net: Tensor, item: Tensor) -> Tensor:
        """"Combine feature maps"""

        # crop feature maps
        crop_size = int(item.shape[1] - net.shape[1]) / 2
        item_cropped = Cropping2D(int(crop_size))(item)

        # adapt number of filters via 1x1 convolutional to allow merge
        current_filters = int(net.shape[-1])
        item_cropped = Conv2D(current_filters,
                              1,
                              activation=self._activation,
                              padding=self._padding)(item_cropped)

        # Combine feature maps by adding
        if self._merge_type == "add":
            return Add()([item_cropped, net])
        # Combine feature maps by subtracting
        if self._merge_type == "subtract":
            return Subtract()([item_cropped, net])
        # Combine feature maps by multiplication
        if self._merge_type == "multiply":
            return Multiply()([item_cropped, net])

        # Raise ValueError if merge type is unsupported
        raise ValueError(f"unsupported merge type: {self._merge_type}")
예제 #13
0
def InceptionModel(x_train):
    X_input = Input(x_train.shape)

    X = Cropping2D(cropping=((60, 25), (0, 0)))(X_input)

    # Re-sizes the input with Kera's Lambda layer & attach to cifar_input
    X = Lambda(lambda image: tf.image.resize(image, (input_size, input_size)))(
        X)

    # Feeds the re-sized input into Inception model
    inp = inception(X)

    model = GlobalAveragePooling2D(data_format=None)(
        inception.get_output_at(-1))
    model = Dense(240)(model)
    model = Dense(64)(model)
    predictions = Dense(1, activation='relu')(model)

    # Creates the model, assuming your final layer is named "predictions"
    model = Model(inputs=X_input, outputs=predictions)
    # Compile the model
    model.compile(optimizer='Adam', loss='mse', metrics=['mse'])

    # Check the summary of this new model to confirm the architecture
    model.summary()
    return model
def refunit(divider, ch):

    image_input = Input(shape=(int(img_y / divider), int(img_x / divider), ch))
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(image_input)
    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3))(x)

    x = encoding_conv_block(x,
                            3, [64, 64, 256],
                            stage=2,
                            block='a',
                            strides=(1, 1))
    x = encoding_conv_block(x, 3, [128, 128, 512], stage=3, block='a')

    x = decoding_conv_block(x, 3, [512, 512, 128], stage=6, block='a')

    x = decoding_conv_block(x, 3, [256, 256, 64], stage=7, block='a')
    x = ZeroPadding2D(padding=(0, 1), data_format=None)(x)

    x = UpSampling2D(size=(3, 3))(x)
    x = Cropping2D(cropping=((2, 2), (1, 1)), data_format=None)(x)
    x = Conv2DTranspose(1, (3, 3), padding='same', name='c8o')(x)
    x = Activation('sigmoid')(x)
    modelo = Model(inputs=image_input, outputs=x)
    modelo.summary()
    return modelo
예제 #15
0
def ar_model(t_skip, pred_steps, dim_c, dim_z):
    """Build CPC auto-regressive model (i.e. g_ar).

    Parameters
    ----------
    t_skip : int
        number of c_t steps to crop from beginning of output seqence
    pred_steps : int
        number of future steps to predict
    dim_c : int
        dimension of output context vectors (c_t from fig. 1 of CPC paper)
    dim_z : int
        dimension of input latent vectors (z_t from fig. 1 of CPC paper)

    Returns
    -------
    model : keras.Model
        Model that expects time sequence input of shape (N, t_steps, z_dim) and
        returns encoded sequence of shape (N, t_steps, pred_steps, z_dim).  N is
        the batch dimension.
    """
    model = keras.Sequential(name='ar')
    model.add(GRU(dim_c, return_sequences=True))
    model.add(Lambda(lambda x: K.expand_dims(x, axis=2)))
    model.add(Conv2DTranspose(filters=dim_z, kernel_size=(1, pred_steps)))
    model.add(Cropping2D(((t_skip, pred_steps + 1), (0, 0))))

    return model
예제 #16
0
def padded_unet(config):
    unet_start_neurons = config['unet_start_neurons']
    unet_dropout_ratio = config['unet_dropout_ratio']
    input_shape, num_output_channels = utils.get_input_output_shapes(config)
    inputs = [Input(input_shape, dtype='float32', name='input_array')]
    x = inputs[0]

    # Pad the inputs with zeros
    x = ZeroPadding2D()(x)
    x = Cropping2D(((1, 0), (1, 0)))(x)

    # Unet
    x = unet_core(x, unet_start_neurons, unet_dropout_ratio)

    # MLP layers
    mlp_layers = config['action_mlp_layers'] + [num_output_channels]
    for i, layer_size in enumerate(mlp_layers):
        if i < (len(mlp_layers) - 1):
            # Non final fully connected layers
            x = Dense(layer_size, activation='linear')(x)
            x = Activation('relu')(x)
        else:
            # Sigmoid activation on the final Q-value function output layer
            x = Dense(layer_size, activation='linear')(x)
            outputs = [Activation('sigmoid')(x)]

    return inputs, outputs
예제 #17
0
def cond2d_arch_decoder(encoded,
                        decoded_dim_x,
                        decoded_dim_y,
                        n_conv,
                        x_shape,
                        activation = 'relu',
                        filters = 4,
                        filter_factor = 2,
                        last_filters = 1,
                        kernel_size = (5,5),
                        strides = (1,1),
                        upsample_size = (2,2),
                        activity_regularizer = None,
                        transpose = False):

    x = Dense(x_shape[1]*x_shape[2]*x_shape[3],
              activation=activation)(encoded)

    x = Reshape(x_shape[1:])(x)

    for _ in range(n_conv-1):
        x = decoder_cnn_2d(x,
                           filters = filters,
                           kernel_size = kernel_size,
                           strides = strides,
                           activation = activation,
                           upsample_size = upsample_size,
                           activity_regularizer = activity_regularizer,
                           transpose = transpose)
        filters = int(filters//filter_factor)
        
    decoded = decoder_cnn_2d(x,
                             filters = last_filters,
                             kernel_size = kernel_size,
                             strides = (1,1),
                             activation = activation,
                             upsample_size = 0,
                             activity_regularizer = activity_regularizer,
                             transpose = transpose)
    
    d_shape = decoded.get_shape().as_list()
    delta_x = d_shape[1] - decoded_dim_x
    delta_y = d_shape[2] - decoded_dim_y

    d_crop_x = int(delta_x/2)
    d_crop_y = int(delta_y/2)
    
    if delta_x%2==0:
        d_crop_x = (d_crop_x,d_crop_x)
    else:
        d_crop_x = (d_crop_x,d_crop_x+1)
        
    if delta_y%2==0:
        d_crop_y = (d_crop_y,d_crop_y)
    else:
        d_crop_y = (d_crop_y,d_crop_y+1)
    
    decoded = Cropping2D(cropping=(d_crop_x,d_crop_y))(decoded)
    
    return decoded
예제 #18
0
def unet(H, W, Hpad=3, Wpad=3, kshape=(3, 3), channels=24):
    """
	U-net reconstruction model. It receives as input the channel-wise zero-filled reconstruction.
	Reference: Jin et al., "Deep Convolutional Neural Network for Inverse Problems in Imaging", IEEE Tran Img Proc, 2017.
`	"""

    inputs = Input(shape=(H, W, channels))
    input_padded = ZeroPadding2D(padding=(Hpad, Wpad))(
        inputs)  # Pad to compensate for the max-poolings

    conv1 = Conv2D(64, kshape, activation='relu', padding='same')(input_padded)
    conv1 = Conv2D(64, kshape, activation='relu', padding='same')(conv1)
    conv1 = Conv2D(64, kshape, activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, kshape, activation='relu', padding='same')(pool1)
    conv2 = Conv2D(128, kshape, activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, kshape, activation='relu', padding='same')(pool2)
    conv3 = Conv2D(256, kshape, activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, kshape, activation='relu', padding='same')(pool3)
    conv4 = Conv2D(512, kshape, activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(1024, kshape, activation='relu', padding='same')(pool4)
    conv5 = Conv2D(1024, kshape, activation='relu', padding='same')(conv5)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)

    conv6 = Conv2D(512, kshape, activation='relu', padding='same')(up1)
    conv6 = Conv2D(512, kshape, activation='relu', padding='same')(conv6)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=-1)

    conv7 = Conv2D(256, kshape, activation='relu', padding='same')(up2)
    conv7 = Conv2D(256, kshape, activation='relu', padding='same')(conv7)

    up3 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=-1)

    conv8 = Conv2D(128, kshape, activation='relu', padding='same')(up3)
    conv8 = Conv2D(128, kshape, activation='relu', padding='same')(conv8)

    up4 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=-1)

    conv9 = Conv2D(128, kshape, activation='relu', padding='same')(up4)
    conv9 = Conv2D(128, kshape, activation='relu', padding='same')(conv9)

    conv10 = Conv2D(channels, (1, 1), activation='linear')(conv9)

    res = Add()([conv10, input_padded])  # Residual

    out = Cropping2D(cropping=(Hpad, Wpad))(
        res)  # Crop to go back to desired image dimensions

    model = Model(inputs=inputs, outputs=out)

    return model
예제 #19
0
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]):
    model = Sequential()
    if input_shape[0] % 8 == 0:
        pad3 = 'same'
    else:
        pad3 = 'valid'

    model.add(InputLayer(input_shape))
    model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1'))

    model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2'))

    model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3'))

    model.add(Flatten())
    model.add(Dense(units=filters[3], name='embedding'))
    model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu'))

    model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2])))
    model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3'))

    model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2'))

    model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1'))

    if model.layers[0].input_shape != model.layers[-1].output_shape:
        crop = abs(model.layers[0].input_shape[1] - model.layers[-1].output_shape[1])//2
        model.add(Cropping2D(crop))

    encoder = Model(inputs=model.input, outputs=model.get_layer('embedding').output)
    return model, encoder
예제 #20
0
def crop_noise(noise_tensor, size, block):
    """
    Crops the noise_tensor to the target size.
    """
    cut = (noise_tensor.shape[1] - size) // 2
    crop = Cropping2D(cut, name=f"G_Noise_Crop_block_{block}")(noise_tensor)
    return crop
예제 #21
0
    def __init__(self, depth):
        super().__init__()

        self.model_layers = []
        self.model_layers.append(
            ZeroPadding2D(padding=15, data_format='channels_last'))
        self.model_layers.append(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   kernel_initializer='Orthogonal',
                   padding='same',
                   activation='relu'))
        for i in range(depth - 2):
            self.model_layers.append(
                Conv2D(filters=64,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       kernel_initializer='Orthogonal',
                       padding='same',
                       use_bias=False))
            self.model_layers.append(BatchNormalization())
            self.model_layers.append(Activation('relu'))

        self.model_layers.append(
            Conv2D(filters=3,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   kernel_initializer='Orthogonal',
                   padding='same',
                   use_bias=False))
        self.model_layers.append(
            Cropping2D(cropping=15, data_format='channels_last'))

        self.subtract = (Subtract())
def _upsampling_block(x,
                      skip,
                      nb_filters,
                      crop_px,
                      conv_padding,
                      conv_layer_type=Conv2D):
    skip = Cropping2D(crop_px)(skip)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv_layer_type(nb_filters,
                        2,
                        activation='relu',
                        padding='same',
                        kernel_initializer='he_normal')(x)
    x = concatenate([skip, x], axis=3)
    x = conv_layer_type(nb_filters,
                        3,
                        activation='relu',
                        padding=conv_padding,
                        kernel_initializer='he_normal')(x)
    x = conv_layer_type(nb_filters,
                        3,
                        activation='relu',
                        padding=conv_padding,
                        kernel_initializer='he_normal')(x)
    return x
예제 #23
0
def fcn_residual_32x_18l(num_classes, name_suffix=''):
    """
    February 15

    Adding residual branches

    :param num_classes:
    :param name_suffix:
    :return:
    """

    coef = 3
    width = 64

    input_layer = Input(shape=(None, None, 3))
    x = BatchNormalization()(input_layer)
    x = Conv2D(
        width,
        3,
        **conv_args,
    )(x)  # Makes width wide enough for addition inside skip module

    for i in range(7):
        y = Cropping2D(cropping=((2, 2), (2, 2)), )(x)

        x = BatchNormalization()(x)
        x = Conv2D(
            width,
            3,
            **conv_args,
        )(x)

        x = BatchNormalization()(x)
        x = Conv2D(
            width,
            3,
            **conv_args,
        )(x)

        # if i % 2 == 0:
        #     x = AvgPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)

        # y = Conv2D(width, 1, **conv_args)(y)  # 1x1
        x = add([x, y])

    x = BatchNormalization()(x)
    x = Conv2D(16 * 1 << coef, 2, **conv_args)(x)  # fit-once

    x = BatchNormalization()(x)
    x = Conv2D(16 * 1 << coef, 1, **conv_args)(x)

    x = BatchNormalization()(x)
    x = Conv2D(num_classes, 1, kernel_initializer=he_norm)(x)  # no activation

    x = Softmax()(x)

    model = tf.keras.Model(inputs=input_layer,
                           outputs=x,
                           name='residual_32x_64w_18l_' + name_suffix)
    return model, 32
예제 #24
0
파일: lunet.py 프로젝트: sage-group/hugin
def unet_rrn(
    name,
    input_shapes,
    output_shapes,
    kernel=3,
    stride=1,
    activation='elu',
    output_channels=2,
    kinit='RandomUniform',
    batch_norm=False,
    padding='same',
    axis=3,
    crop=0,
    mpadd=0,
):
    nr_classes = output_channels
    timeseries, input_1_height, input_1_width, input_1_channels = input_shapes[
        "input_1"]
    timeseries_mask_shape = input_shapes["input_2"]
    inputs = Input(
        (timeseries, input_1_height, input_1_width, input_1_channels))
    print("ZZZZZZZZZZZZZZZZZZZZZ", timeseries_mask_shape)
    mask = Input(timeseries_mask_shape)
    #masks = Input(timeseries_mask_shape)

    # Encoding
    conv1_output, conv1_output_last, state1_h, state1_c, pool1 = encode_block(
        32, inputs, kernel, stride, activation, kinit, padding, mask=mask)
    conv2_output, conv2_output_last, state2_h, state2_c, pool2 = encode_block(
        64, pool1, kernel, stride, activation, kinit, padding, mask=mask)
    conv3_output, conv3_output_last, state3_h, state3_c, pool3 = encode_block(
        128, pool2, kernel, stride, activation, kinit, padding, mask=mask)
    conv4_output, conv4_output_last, state4_h, state4_c, pool4 = encode_block(
        256, pool3, kernel, stride, activation, kinit, padding, mask=mask)

    # Middle
    conv5_output, conv5_output_last, state5_h, state5_c = encode_block(
        512, pool4, kernel, stride, activation, kinit, padding, max_pool=False)

    # Decoding
    conv6 = conv_t_block(256, conv5_output_last, conv4_output_last, kernel,
                         stride, activation, kinit, padding, axis)
    conv7 = conv_t_block(128, conv6, conv3_output_last, kernel, stride,
                         activation, kinit, padding, axis)
    conv8 = conv_t_block(64, conv7, conv2_output_last, kernel, stride,
                         activation, kinit, padding, axis)
    conv9 = conv_t_block(32, conv8, conv1_output_last, kernel, stride,
                         activation, kinit, padding, axis)

    # Output
    conv9 = BatchNormalization()(conv9) if batch_norm else conv9

    conv9 = Cropping2D((mpadd, mpadd))(conv9)

    conv10 = Convolution2D(nr_classes, (1, 1),
                           activation='softmax',
                           name="output_1")(conv9)
    model = Model(inputs=[inputs, mask], outputs=[conv10])

    return model
def encoder_decoder_model(ds):
    inp = Input(shape=(None, None, 1))
    pad_x = np.ceil(
        ds[0][0]['width'].numpy()[0] / 8) * 8 - ds[0][0]['width'].numpy()[0]
    pad_y = np.ceil(
        ds[0][0]['height'].numpy()[0] / 8) * 8 - ds[0][0]['height'].numpy()[0]
    x = ZeroPadding2D(((0, int(pad_x)), (0, int(pad_y))))(inp)
    x = Conv2D(16, kernel_size=(3, 3), padding='same', activation='sigmoid')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, kernel_size=(3, 3), padding='same', activation='sigmoid')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, kernel_size=(3, 3), padding='same', activation='sigmoid')(x)
    encoded = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(8, kernel_size=(3, 3), activation='sigmoid',
               padding='same')(encoded)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, kernel_size=(3, 3), activation='sigmoid', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, kernel_size=(3, 3), activation='sigmoid', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Cropping2D(((0, int(pad_x)), (0, int(pad_y))))(x)
    decoded = Conv2D(1,
                     kernel_size=(3, 3),
                     activation='sigmoid',
                     padding='same')(x)
    return inp, encoded, decoded
예제 #26
0
def get_lenet_model():
    """
    LeNet architecture
    :return: keras model
    """
    inputs = Input(shape=(160, 320, 3))  # input as received from simulation
    x = Lambda(lambda img: img / 255.0 - 0.5)(inputs)  # normalization
    x = Cropping2D(cropping=((70, 25), (0, 0)))(
        x)  # cropping out top and bottom of the image

    x = Conv2D(filters=6, kernel_size=(5, 5), activation="relu")(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(filters=16, kernel_size=(5, 5), activation="relu")(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Flatten()(x)
    x = Dense(120, activation='relu')(x)
    x = Dropout(0.5)(x)

    x = Dense(84, activation='relu')(x)
    x = Dropout(0.5)(x)

    out = Dense(1, activation='linear')(x)

    model = Model(inputs=inputs, outputs=out)
    model.compile(loss='mse',
                  optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4))
    model.summary()

    # needs graphviz installed
    tf.keras.utils.plot_model(model, to_file='data/lenet.png')

    return model
예제 #27
0
def vanilla_unet(in_shape):
    input = Input((in_shape, in_shape, 3))
    x = input

    # Downsampling path
    down_layers = []
    filters = 64
    for _ in range(4):
        x = conv2d_block(x, filters)
        down_layers.append(x)
        x = MaxPooling2D((2, 2), strides=2)(x)
        filters *= 2  # Number of filters doubled with each layer

    x = conv2d_block(x, filters)

    for conv in reversed(down_layers):
        filters //= 2
        x = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(x)

        ch, cw = crop_shape(get_shape(conv), get_shape(x))
        conv = Cropping2D((ch, cw))(conv)

        x = concatenate([x, conv])
        x = conv2d_block(x, filters)

    output = Conv2D(1, (1, 1), activation='sigmoid')(x)
    return Model(input, output)
예제 #28
0
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
    '''
    Adjusts the input `p` to match the shape of the `input`
    or situations where the output number of filters needs to
    be changed

    # Arguments:
        p: input tensor which needs to be modified
        ip: input tensor whose shape needs to be matched
        filters: number of output filters to be matched
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        an adjusted Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    img_dim = 2 if K.image_data_format() == 'channels_first' else -2

    with K.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
            with K.name_scope('adjust_reduction_block_%s' % id):
                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)

                p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',
                                      name='adjust_avg_pool_1_%s' % id)(p)
                p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_1_%s' % id,
                            kernel_initializer='he_normal')(p1)

                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',
                                      name='adjust_avg_pool_2_%s' % id)(p2)
                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_2_%s' % id,
                            kernel_initializer='he_normal')(p2)

                p = concatenate([p1, p2], axis=channel_dim)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
                                       epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)

        elif p._keras_shape[channel_dim] != filters:
            with K.name_scope('adjust_projection_block_%s' % id):
                p = Activation('relu')(p)
                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same',
                           name='adjust_conv_projection_%s' % id, use_bias=False,
                           kernel_regularizer=l2(weight_decay),
                           kernel_initializer='he_normal')(p)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
                                       epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)
    return p
def _vertical_blindspot_network(x):
    """ Blind-spot network; adapted from noise2noise GitHub
    Each row of output only sees input pixels above that row
  """
    skips = [x]

    n = x
    n = _vshifted_conv(n, 48, 'enc_conv0')
    n = _vshifted_conv(n, 48, 'enc_conv1')
    n = _vshifted_pool(n)
    skips.append(n)

    n = _vshifted_conv(n, 48, 'enc_conv2')
    n = _vshifted_pool(n)
    skips.append(n)

    n = _vshifted_conv(n, 48, 'enc_conv3')
    n = _vshifted_pool(n)
    skips.append(n)

    n = _vshifted_conv(n, 48, 'enc_conv4')
    n = _vshifted_pool(n)
    skips.append(n)

    n = _vshifted_conv(n, 48, 'enc_conv5')
    n = _vshifted_pool(n)
    n = _vshifted_conv(n, 48, 'enc_conv6')

    #-----------------------------------------------
    n = UpSampling2D(2)(n)
    n = Concatenate(axis=3)([n, skips.pop()])
    n = _vshifted_conv(n, 96, 'dec_conv5')
    n = _vshifted_conv(n, 96, 'dec_conv5b')

    n = UpSampling2D(2)(n)
    n = Concatenate(axis=3)([n, skips.pop()])
    n = _vshifted_conv(n, 96, 'dec_conv4')
    n = _vshifted_conv(n, 96, 'dec_conv4b')

    n = UpSampling2D(2)(n)
    n = Concatenate(axis=3)([n, skips.pop()])
    n = _vshifted_conv(n, 96, 'dec_conv3')
    n = _vshifted_conv(n, 96, 'dec_conv3b')

    n = UpSampling2D(2)(n)
    n = Concatenate(axis=3)([n, skips.pop()])
    n = _vshifted_conv(n, 96, 'dec_conv2')
    n = _vshifted_conv(n, 96, 'dec_conv2b')

    n = UpSampling2D(2)(n)
    n = Concatenate(axis=3)([n, skips.pop()])
    n = _vshifted_conv(n, 96, 'dec_conv1a')
    n = _vshifted_conv(n, 96, 'dec_conv1b')

    # final pad and crop for blind spot
    n = ZeroPadding2D([[1, 0], [0, 0]])(n)
    n = Cropping2D([[0, 1], [0, 0]])(n)

    return n
예제 #30
0
 def __call__(self, x):
     h_kernel_size = self.kernel_size // 2+1
     if self.mask_type == 'A':
         h_kernel_size -= 1
     output = ZeroPadding2D(((0, 0), (h_kernel_size, 0)))(x)
     output = Convolution2D(self.filters, (1, h_kernel_size))(output)
     output = Cropping2D(((0, 0), (0, 1)))(output)
     return output