Ejemplo n.º 1
0
def sam_resnet(x):
    # Dilated Convolutional Network
    dcn = dcn_resnet(input_tensor=x[0])
    conv_feat = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(dcn.output)

    # Attentive Convolutional LSTM
    att_convlstm = Lambda(repeat, repeat_shape)(conv_feat)
    att_convlstm = AttentiveConvLSTM(nb_filters_in=512, nb_filters_out=512, nb_filters_att=512,
                                     nb_cols=3, nb_rows=3)(att_convlstm)

    # Learned Prior (1)
    priors1 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])
    concateneted = merge([att_convlstm, priors1], mode='concat', concat_axis=1)
    learned_priors1 = AtrousConvolution2D(512, 5, 5, border_mode='same', activation='relu',
                                          atrous_rate=(4, 4))(concateneted)

    # Learned Prior (2)
    priors2 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])
    concateneted = merge([learned_priors1, priors2], mode='concat', concat_axis=1)
    learned_priors2 = AtrousConvolution2D(512, 5, 5, border_mode='same', activation='relu',
                                          atrous_rate=(4, 4))(concateneted)

    # Final Convolutional Layer
    outs = Convolution2D(1, 1, 1, border_mode='same', activation='relu')(learned_priors2)
    outs_up = Lambda(upsampling, upsampling_shape)(outs)

    return [outs_up, outs_up, outs_up]
Ejemplo n.º 2
0
def sam_resnet(data):

    # dcn = dcn_resnet(input_tensor=data, trainable=True)
    bn_axis = 3
    trainable = True  #
    # conv_1
    conv_1_out = ZeroPadding2D((3, 3), batch_size=1)(data)
    conv_1_out = Conv2D(64, (7, 7),
                        strides=(2, 2),
                        name='conv1',
                        trainable=trainable)(conv_1_out)
    conv_1_out = BatchNormalization(axis=bn_axis,
                                    name='bn_conv1',
                                    trainable=trainable)(conv_1_out)
    conv_1_out_b = Activation('relu')(conv_1_out)
    conv_1_out = MaxPooling2D((3, 3), strides=(2, 2),
                              padding='same')(conv_1_out_b)

    # conv_2
    conv_2_out = conv_block(conv_1_out,
                            3, [64, 64, 256],
                            stage=2,
                            block='a',
                            strides=(1, 1),
                            trainable=trainable)
    conv_2_out = identity_block(conv_2_out,
                                3, [64, 64, 256],
                                stage=2,
                                block='b',
                                trainable=trainable)
    conv_2_out = identity_block(conv_2_out,
                                3, [64, 64, 256],
                                stage=2,
                                block='c',
                                trainable=trainable)

    # conv_3
    conv_3_out = conv_block(conv_2_out,
                            3, [128, 128, 512],
                            stage=3,
                            block='a',
                            strides=(2, 2),
                            trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='b',
                                trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='c',
                                trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='d',
                                trainable=trainable)

    # conv_4
    conv_4_out = conv_block(conv_3_out,
                            3, [256, 256, 1024],
                            stage=4,
                            block='a',
                            trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='b',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='c',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='d',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='e',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='f',
                                trainable=trainable)

    # conv_5
    conv_5_out = conv_block(conv_4_out,
                            3, [512, 512, 2048],
                            stage=5,
                            block='a',
                            strides=(1, 1),
                            trainable=trainable)  #
    conv_5_out = identity_block(conv_5_out,
                                3, [512, 512, 2048],
                                stage=5,
                                block='b',
                                trainable=trainable)
    conv_5_out = identity_block(conv_5_out,
                                3, [512, 512, 2048],
                                stage=5,
                                block='c',
                                trainable=trainable)
    #
    # processing Resnet output
    resnet_outs = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='resnet_out',
                         trainable=trainable)(conv_5_out)
    resnet_outs = Flatten()(resnet_outs)
    resnet_outs = RepeatVector(nb_timestep)(resnet_outs)
    resnet_outs = Reshape((nb_timestep, 14, 14, 512))(resnet_outs)

    # Attentive Convolutional LSTM
    convLSTM_outs = AttentiveConvLSTM(nb_filters_in=512,
                                      nb_filters_out=512,
                                      nb_filters_att=512,
                                      nb_cols=3,
                                      nb_rows=3,
                                      name='attenconvsltm')(
                                          resnet_outs)  #, trainable=True
    convLSTM_outs = Lambda(Kreshape,
                           arguments={'shape': [-1, 14, 14, 512]},
                           output_shape=[14, 14, 512])(convLSTM_outs)
    # final land output
    land_outs = Conv2D(8, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       name='land_con5',
                       trainable=True)(convLSTM_outs)
    # upsamping land output for 3rd block
    conv_3_out = Conv2D(64, (3, 3),
                        padding='same',
                        activation='relu',
                        name='conv_3_out',
                        trainable=True)(conv_3_out)
    up3_land_outs = UpSampling2D(size=(2, 2))(land_outs)
    up3_land_outs = Concatenate()([conv_3_out, up3_land_outs])
    up3_land_outs = Flatten()(up3_land_outs)
    up3_land_outs = RepeatVector(nb_timestep)(up3_land_outs)
    up3_land_outs = Reshape((nb_timestep, 28, 28, 72))(up3_land_outs)
    up3_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con3'))(up3_land_outs)
    # # upsamping land output for 2nd block
    conv_2_out = Conv2D(64, (3, 3),
                        padding='same',
                        activation='relu',
                        name='conv_2_out',
                        trainable=True)(conv_2_out)
    up2_land_outs = UpSampling2D(size=(2, 2))(up3_land_outs)
    up2_land_outs = Concatenate()([conv_2_out, up2_land_outs])
    up2_land_outs = Flatten()(up2_land_outs)
    up2_land_outs = RepeatVector(nb_timestep)(up2_land_outs)
    up2_land_outs = Reshape((nb_timestep, 56, 56, 72))(up2_land_outs)
    up2_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con2'))(up2_land_outs)

    # # # upsamping land output for 1st block
    up1_land_outs = UpSampling2D(size=(2, 2))(up2_land_outs)
    up1_land_outs = Concatenate()([conv_1_out_b, up1_land_outs])
    up1_land_outs = Flatten()(up1_land_outs)
    up1_land_outs = RepeatVector(nb_timestep)(up1_land_outs)
    up1_land_outs = Reshape((nb_timestep, 112, 112, 72))(up1_land_outs)
    up1_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con1'))(up1_land_outs)

    # outs = Lambda(Kpool, arguments={'pool_size': (14, 14)}, output_shape=[1, 1, 512])(outs)
    # # outs = K.pool2d(outs, (7,7), pool_mode='avg')

    outs = AveragePooling2D((14, 14), name='avg_pool')(convLSTM_outs)
    outs = Flatten()(outs)
    #
    attri_outs = Dense(1000,
                       kernel_initializer='normal',
                       activation='sigmoid',
                       name='attri',
                       trainable=trainable)(outs)
    #
    cate_outs = Dense(cate_num,
                      kernel_initializer='normal',
                      activation='softmax',
                      name='cate',
                      trainable=trainable)(outs)
    #
    type_outs = Dense(type_num,
                      kernel_initializer='normal',
                      activation='softmax',
                      name='type',
                      trainable=trainable)(outs)

    # land_outs = Dense(196, kernel_initializer='normal', activation='sigmoid', name='land_all', trainable=True)(outs)
    # land_outs = Reshape((14, 14, 1))(land_outs)
    return [
        attri_outs, cate_outs, type_outs, land_outs, up3_land_outs,
        up2_land_outs, up1_land_outs
    ]  #
Ejemplo n.º 3
0
def sam_resnet(x):
    #x = [x, x_maps]
    # Dilated Convolutional Network
    print("Iniciando sam_resnet")
    print("Iniciando dcn_resnet...")
    dcn = dcn_resnet(input_tensor=x[0]) #Ready!!
    aux = K.permute_dimensions(dcn.output, (0, 3, 1, 2))   #Agregado para poner channels_first como en el codigo original.
    #conv_feat = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(dcn.output)
    
    # New Version. Input shape = (None, 2048, 30, 40) output shape=(None, 512, 30, 40)
    #conv_feat = Conv2D(512, 
    #                   (3, 3), 
    #                   padding='same', 
    #                   activation='relu',
    #                   data_format="channels_first")(aux)     # GPU New Version

    conv_feat = Conv2D_NCHW(aux,512, 
                            (3, 3), 
                            padding='same', 
                            activation='relu')     # CPU New Version NCHW

    # Attentive Convolutional LSTM
    print("Iniciando att_convlstm...")
    att_convlstm = Lambda(repeat, repeat_shape)(conv_feat) #Output shape=(1, 4, 512, 30, 40)
    #x = att_convlstm
    #l = AttentiveConvLSTM(nb_filters_in=512, nb_filters_out=512, nb_filters_att=512,nb_cols=3, nb_rows=3)
    #l(x)
    att_convlstm = AttentiveConvLSTM(nb_filters_in=512,  #Output shape=(1, 512, 30, 40)
                                     nb_filters_out=512, 
                                     nb_filters_att=512,
                                     nb_cols=3, 
                                     nb_rows=3)(att_convlstm)
   
    # Learned Prior (1)

    #Input shape = (None, 16, 30, 40), output shape = (1, 16, 30, 40)
    print("Iniciando LearningPrior 1...")
    priors1 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])

    #concateneted = merge([att_convlstm, priors1], mode='concat', concat_axis=1)
    #print(att_convlstm.shape)
    #print(priors1.shape)
    #attentive = att_convlstm * 1  #Eliminando un bug
    #prior = priors1 * 1           #Eliminando un bug
    #print(attentive.shape)
    #print(prior.shape)
    #concateneted = concatenate([attentive, prior], axis=1)          #Nueva version sin BUG

    print("Concatenando...")
    concateneted = concatenate([att_convlstm, priors1], axis=1)    #Version con BUG
 
    #learned_priors1 = AtrousConvolution2D(512, 5, 5, border_mode='same', activation='relu',
    #                                      atrous_rate=(4, 4))(concateneted)

    #learned_priors1 = Conv2D(512, (5, 5), dilation_rate=(4, 4), activation='relu',
    #           data_format="channels_first", padding='same')(concateneted)  #New version for GPU
        
    learned_priors1 = Conv2D_NCHW(concateneted, 512, 
                                  (5, 5), 
                                  dilation_rate=(4, 4), 
                                  activation='relu', 
                                  padding='same')  #New version for CPU 
    # Learned Prior (2) 
    print("Iniciando LearningPrior 2...")
    priors2 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])
    
    #learned_priors1 = learned_priors1 * 1   #Eliminando un bug
    #priors2 = priors2 * 1                   #Eliminando un bug
    
    #concateneted = merge([learned_priors1, priors2], mode='concat', concat_axis=1)
    print("Concatenando...")
    concateneted = concatenate([learned_priors1, priors2], axis=1) 

    #learned_priors2 = AtrousConvolution2D(512, 5, 5, border_mode='same', activation='relu',
    #                                      atrous_rate=(4, 4))(concateneted)

    #learned_priors2 = Conv2D(512, (5, 5), dilation_rate=(4, 4), activation='relu',
    #           data_format="channels_first", padding='same')(concateneted)  #New version for GPU
     
    learned_priors2 = Conv2D_NCHW(concateneted, 512, 
                                  (5, 5), 
                                  dilation_rate=(4, 4), 
                                  activation='relu', 
                                  padding='same')  #New version for CPU 
       
    # Final Convolutional Layer
    print("Final Convolutional Layer")
    #outs = Convolution2D(1, 1, 1, border_mode='same', activation='relu')(learned_priors2)
    #outs = Conv2D(1, (1, 1), padding='same', data_format="channels_first", activation='relu')(learned_priors2) #New version for GPU output shape=(1, 1, 30, 40)
    outs = Conv2D_NCHW(learned_priors2, 1, 
                       (1, 1), 
                       padding='same', 
                       activation='relu') #New version for COU output shape=(1, 1, 30, 40)


    #VALIDAR ESTA FUNCION \\\\\\\\\\\\\\\
    outs_up = Lambda(upsampling, upsampling_shape)(outs) # Input shape=(1, 1, 30, 40)
    #print(outs_up.shape) #(1, 1, 480, 640)
    
    print("Finalizado sam_resnet")
    return [outs_up, outs_up, outs_up]   #When passing a list as loss, it should have one entry per model outputs