def resnet_decoder_timedist(input_shape=(shape_r, shape_c, 3),
                            verbose=True,
                            print_shapes=True,
                            n_outs=1,
                            ups=8,
                            dil_rate=(1, 1)):

    inp = Input(shape=input_shape)

    ### ENCODER ###
    dcn = dcn_resnet(input_tensor=inp)
    if print_shapes: print('resnet output shape:', dcn.output.shape)
    x = Lambda(
        lambda x: K.repeat_elements(
            K.expand_dims(x, axis=1), nb_timestep, axis=1), lambda s:
        (s[0], nb_timestep) + s[1:])(dcn.output)
    ## DECODER ##
    outs_dec = decoder_block_timedist(x,
                                      dil_rate=dil_rate,
                                      print_shapes=print_shapes,
                                      dec_filt=256)

    outs_final = [outs_dec] * n_outs

    # Building model
    m = Model(inp, outs_final)
    if verbose:
        m.summary()
    return m
Example #2
0
def sam_resnet_new(input_shape = (shape_r, shape_c, 3),
                   conv_filters=512, 
                   lstm_filters=512, 
                   att_filters=512,
                   verbose=True, 
                   print_shapes=True, 
                   n_outs=1, 
                   ups=8, 
                   nb_gaussian=nb_gaussian):
    '''SAM-ResNet ported from the original code.'''

    inp = Input(shape=input_shape)

    dcn = dcn_resnet(input_tensor=inp)
    conv_feat = Conv2D(conv_filters, 3, padding='same', activation='relu')(dcn.output)
    if print_shapes:
        print('Shape after first conv after dcn_resnet:',conv_feat.shape)

    # Attentive ConvLSTM
    att_convlstm = Lambda(repeat, repeat_shape)(conv_feat)
    att_convlstm = AttentiveConvLSTM2D(filters=lstm_filters, 
                                       attentive_filters=att_filters, 
                                       kernel_size=(3,3),
                                       attentive_kernel_size=(3,3), 
                                       padding='same', 
                                       return_sequences=False)(att_convlstm)

    # Learned Prior (1)
    priors1 = LearningPrior(nb_gaussian=nb_gaussian)(att_convlstm)
    concat1 = Concatenate(axis=-1)([att_convlstm, priors1])
    dil_conv1 = Conv2D(conv_filters, 5, padding='same', activation='relu', dilation_rate=(4, 4))(concat1)

    # Learned Prior (2)
    priors2 = LearningPrior(nb_gaussian=nb_gaussian)(att_convlstm)
    concat2 = Concatenate(axis=-1)([dil_conv1, priors2])
    dil_conv2 = Conv2D(conv_filters, 5, padding='same', activation='relu', dilation_rate=(4, 4))(concat2)

    # Final conv to get to a heatmap
    outs = Conv2D(1, kernel_size=1, padding='same', activation='relu')(dil_conv2)
    if print_shapes:
        print('Shape after 1x1 conv:',outs.shape)

    # Upsampling back to input shape
    outs_up = UpSampling2D(size=(ups,ups), interpolation='bilinear')(outs)
    if print_shapes:
        print('shape after upsampling',outs_up.shape)


    outs_final = [outs_up]*n_outs


    # Building model
    m = Model(inp, outs_final)
    if verbose:
        m.summary()

    return m
Example #3
0
def sam_resnet_nopriors(input_shape = (224, 224, 3), conv_filters=128, lstm_filters=512,
                        att_filters=512, verbose=True, print_shapes=True, n_outs=1, ups=8):
    '''Sam ResNet with no priors.'''

    inp = Input(shape=input_shape)

    dcn = dcn_resnet(input_tensor=inp)
    conv_feat = Conv2D(conv_filters, 3, padding='same', activation='relu')(dcn.output)
    if print_shapes:
        print('Shape after first conv after dcn_resnet:',conv_feat.shape)

    # Attentive ConvLSTM
    att_convlstm = Lambda(repeat, repeat_shape)(conv_feat)
    att_convlstm = AttentiveConvLSTM2D(filters=lstm_filters, attentive_filters=att_filters, kernel_size=(3,3),
                            attentive_kernel_size=(3,3), padding='same', return_sequences=False)(att_convlstm)

    # Dilated convolutions (priors would go here)
    dil_conv1 = Conv2D(conv_filters, 5, padding='same', activation='relu', dilation_rate=(4, 4))(att_convlstm)
    dil_conv2 = Conv2D(conv_filters, 5, padding='same', activation='relu', dilation_rate=(4, 4))(dil_conv1)

    # Final conv to get to a heatmap
    outs = Conv2D(1, kernel_size=1, padding='same', activation='relu')(dil_conv2)
    if print_shapes:
        print('Shape after 1x1 conv:',outs.shape)

    # Upsampling back to input shape
    outs_up = UpSampling2D(size=(ups,ups), interpolation='bilinear')(outs)
    if print_shapes:
        print('shape after upsampling',outs_up.shape)


    outs_final = [outs_up]*n_outs


    # Building model
    m = Model(inp, outs_final)
    if verbose:
        m.summary()

    return m
def dcnn_3stream(input_shape=(224, 224, 3),
                 conv_filters=512,
                 n_streams=3,
                 verbose=True,
                 print_shapes=True):
    def out_shape(s):
        print("out shape", out_shape)
        return (s[0], 2) + s[1:]

    def _output_stream(x):
        x = Conv2D(filters=conv_filters,
                   kernel_size=3,
                   padding='same',
                   activation='relu')(x)

        # convolve with a kernel size of 1 to flatten the tensor (w x h x nfeatures) from
        # 40 x 30 x 512 to 40 x 30 x 1
        # use kernel size of 1 so you ONLY flatten instead of doing local computations
        # use one filter so that you only get one image channel
        x = Conv2D(filters=1, kernel_size=1, padding='same',
                   activation='relu')(x)

        # upsample from 40x30 to 320x240 (8x upsample)
        x = UpSampling2D(size=(16, 16), interpolation='bilinear')(x)
        x = Lambda(
            lambda y: K.repeat_elements(K.expand_dims(y, axis=1), 2, axis=1),
            output_shape=out_shape)(x)
        return x

    inp = Input(shape=input_shape)
    dcn = dcn_resnet(input_tensor=inp)

    outs = [_output_stream(dcn.output) for _ in range(n_streams)]

    model = Model(inputs=inp, outputs=outs)

    if verbose:
        model.summary()
    return model
Example #5
0
def resnet_decoder(input_shape = (shape_r, shape_c, 3),
                     verbose=True,
                     print_shapes=True,
                     n_outs=1,
                     ups=8,
                    dil_rate = (2,2)):
    inp = Input(shape=input_shape)

    ### ENCODER ###
    dcn = dcn_resnet(input_tensor=inp)
    if print_shapes: print('resnet output shape:',dcn.output.shape)

    ## DECODER ##
    outs_dec = decoder_block(dcn.output, dil_rate=dil_rate, print_shapes=print_shapes, dec_filt=512)

    outs_final = [outs_dec]*n_outs

    # Building model
    m = Model(inp, outs_final)
    if verbose:
        m.summary()
    return m
def sam_resnet_3d(input_shape=(224, 224, 3),
                  filt_3d=128,
                  conv_filters=128,
                  lstm_filters=128,
                  att_filters=128,
                  verbose=True,
                  print_shapes=True,
                  n_outs=1,
                  nb_timestep=3,
                  ups=upsampling_factor):

    inp = Input(shape=input_shape)

    # Input CNN
    dcn = dcn_resnet(input_tensor=inp)
    conv_feat = Conv2D(conv_filters, 3, padding='same',
                       activation='relu')(dcn.output)
    if print_shapes:
        print('Shape after first conv after dcn_resnet:', conv_feat.shape)

    # Attentive ConvLSTM
    att_convlstm = Lambda(
        lambda x: K.repeat_elements(
            K.expand_dims(x, axis=1), nb_timestep, axis=1), lambda s:
        (s[0], nb_timestep) + s[1:])(x)
    att_convlstm = AttentiveConvLSTM2D(filters=lstm_filters,
                                       attentive_filters=att_filters,
                                       kernel_size=(3, 3),
                                       attentive_kernel_size=(3, 3),
                                       padding='same',
                                       return_sequences=True)(att_convlstm)

    if print_shapes: print('att_convlstm output shape', att_convlstm.shape)

    # Output flow
    x = Conv3D(filt_3d, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               dilation_rate=(4, 4, 1),
               activation=None,
               kernel_initializer='he_normal')(att_convlstm)
    x = BatchNormalization(axis=4)(x)
    x = Activation('relu')(x)
    x = Conv3D(filt_3d, (3, 3, 1),
               strides=(1, 1, 1),
               padding='same',
               dilation_rate=(4, 4, 1),
               activation=None,
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=4)(x)
    x = Activation('relu')(x)

    x = Conv3D(1, (1, 1, 1),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='he_normal')(x)

    out_final = TimeDistributed(
        UpSampling2D(size=(ups, ups), interpolation='bilinear'))(x)

    if print_shapes: print('outs_final shape:', outs_final.shape)
    outs_final = [outs_final] * n_outs

    m = Model(inp, outs_final)
    if verbose:
        m.summary()
    return m