示例#1
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        branch1x1 = self.branch1x1.forward(x)

        branch3x3 = self.branch3x3_1.forward(x)
        branch3x3 = [
            self.branch3x3_2a.forward(branch3x3),
            self.branch3x3_2b.forward(branch3x3),
        ]
        branch3x3 = concatenate(
            branch3x3, axis=channel_axis)  #branch3x3 = torch.cat(branch3x3, 1)

        branch3x3dbl = self.branch3x3dbl_1.forward(x)
        branch3x3dbl = self.branch3x3dbl_2.forward(branch3x3dbl)
        branch3x3dbl = [
            self.branch3x3dbl_3a.forward(branch3x3dbl),
            self.branch3x3dbl_3b.forward(branch3x3dbl),
        ]
        branch3x3dbl = concatenate(
            branch3x3dbl,
            axis=channel_axis)  #branch3x3dbl = torch.cat(branch3x3dbl, 1)

        branch_pool = AveragePooling2D(
            pool_size=(3, 3), strides=(1, 1), padding='same'
        )(x
          )  #branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool.forward(branch_pool)

        outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
        return concatenate(outputs, axis=channel_axis)  #torch.cat(outputs, 1)
示例#2
0
 def forward(self, x):
     layer_list = []
     layer_list.append(
         self.squeeze)  #x = self.squeeze_activation(self.squeeze(x))
     layer_list.append(self.squeeze_activation)
     concatenate(
     )  #torch.cat([self.expand1x1_activation(self.expand1x1(x)),self.expand3x3_activation(self.expand3x3(x))], 1)
     return layer_list
示例#3
0
def branch_attention(cost_volume_3d, cost_volume_h, cost_volume_v,
                     cost_volume_45, cost_volume_135):
    feature = 4 * 9
    k = 9
    label = 9
    cost1 = convbn(cost_volume_3d, 6, 3, 1, 1)
    cost1 = Activation('relu')(cost1)
    cost1 = convbn(cost1, 4, 3, 1, 1)
    cost1 = Activation('sigmoid')(cost1)
    cost_h = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, :1], 1), 9, 1))(cost1)
    cost_h = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_h)
    cost_v = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 1:2], 1), 9, 1))(cost1)
    cost_v = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_v)
    cost_45 = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 2:3], 1), 9, 1))(cost1)
    cost_45 = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_45)
    cost_135 = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 3:4], 1), 9, 1))(cost1)
    cost_135 = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_135)
    return concatenate([
        multiply([cost_h, cost_volume_h]),
        multiply([cost_v, cost_volume_v]),
        multiply([cost_45, cost_volume_45]),
        multiply([cost_135, cost_volume_135])
    ],
                       axis=4), cost1
示例#4
0
def define_epinet(sz_input,sz_input2,view_n,conv_depth,filt_num,learning_rate):

    ''' 4-Input : Conv - Relu - Conv - BN - Relu ''' 
    input_stack_90d = Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_90d')
    input_stack_0d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_0d')
    input_stack_45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_45d')
    input_stack_M45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_M45d')
    
    ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu ''' 
    mid_90d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_90d)
    mid_0d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_0d)    
    mid_45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_45d)    
    mid_M45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_M45d)   

    ''' Merge layers ''' 
    mid_merged = concatenate([mid_90d,mid_0d,mid_45d,mid_M45d],  name='mid_merged')
    
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    mid_merged_=layer2_merged(sz_input-6,sz_input2-6,int(4*filt_num),int(4*filt_num),conv_depth)(mid_merged)

    ''' Last Conv layer : Conv - Relu - Conv '''
    output=layer3_last(sz_input-18,sz_input2-18,int(4*filt_num),int(4*filt_num))(mid_merged_)

    model_512 = Model(inputs = [input_stack_90d,input_stack_0d,
                               input_stack_45d,input_stack_M45d], outputs = [output])
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary() 
    
    return model_512
示例#5
0
    def inference(self, mode='simple'):

        left_image = Input(shape=(self.model_in_height, self.model_in_width,
                                  self.model_in_depth),
                           name='left_input')
        right_image = Input(shape=(self.model_in_height, self.model_in_width,
                                   self.model_in_depth),
                            name='right_image')

        if mode == 'simple':
            concate_view = concatenate([left_image, right_image],
                                       axis=3,
                                       name='concate_view')
            prediction = self.FlowNetSimple(concate_view)

            FlowNet = Model(inputs=[left_image, right_image],
                            outputs=[prediction])
            opt = Adam(lr=self.learning_rate)
            FlowNet.compile(optimizer=opt, loss='mae')
            FlowNet.summary()

            return FlowNet

        if mode == 'correlation':
            prediction = self.FlowNetCorr(left_image, right_image)

            FlowNet = Model(inputs=[left_image, right_image],
                            outputs=[prediction])
            opt = Adam(lr=self.learning_rate)
            FlowNet.compile(optimizer=opt, loss='mae')
            FlowNet.summary()

            return FlowNet
示例#6
0
def get_model(filters_count, conv_depth, learning_rate, input_shape=(512, 512, 9)):
    # input shape=512x512x9 ?灰度化的9个图?
    input_90d = Input(shape=input_shape, name='input_90d')
    input_0d = Input(shape=input_shape, name='input_0d')
    input_45d = Input(shape=input_shape, name='input_45d')
    input_m45d = Input(shape=input_shape, name='input_m45d')

    # 4 Stream layer
    stream_ver = layersP1_multistream(input_shape, int(filters_count))(input_90d)
    stream_hor = layersP1_multistream(input_shape, int(filters_count))(input_0d)
    stream_45d = layersP1_multistream(input_shape, int(filters_count))(input_45d)
    stream_m45d = layersP1_multistream(input_shape, int(filters_count))(input_m45d)

    # merge streams
    merged = concatenate([stream_ver,stream_hor, stream_45d, stream_m45d], name='merged')

    # layers part2: conv-relu-bn-conv-relu
    merged = layersP2_merged(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4,
                             conv_depth=conv_depth)(merged)

    # output
    output = layersP3_output(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4)(merged)

    mymodel = Model(inputs=[input_90d,input_0d, input_45d, input_m45d], outputs=[output])

    optimizer = RMSprop(lr=learning_rate)
    mymodel.compile(optimizer=optimizer, loss='mae')
    mymodel.summary()

    return mymodel
def decoder_block(x, y, scope, size=None, upconv=True, ksize=(3, 3), upsize=(2, 2), upstirdes=(2, 2), act_fn='relu',
                  ep_collection='end_points', reuse=None, batch_norm=True, dropout=0.0):
    if size is None:
        base_size = x.get_shape().as_list()[-1]
        size = int(base_size / 2)
    with tf.variable_scope(scope, scope, [x], reuse=reuse) as sc:
        x = ThresholdedReLU(theta=0.0)(x)
        uped = Conv2DTranspose(size, upsize, strides=upstirdes, padding='same')(x) if upconv else x

        uped, y = reconcile_feature_size(uped, y)
        up = concatenate([uped, y], axis=3)
        tf.add_to_collection(ep_collection, up)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(up)
        tf.add_to_collection(ep_collection, conv)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(conv)
        tf.add_to_collection(ep_collection, conv)

        if batch_norm:
            conv = BatchNormalization()(conv, training=True)
            tf.add_to_collection(ep_collection, conv)
        if dropout > 0.0:
            conv = Dropout(dropout)(conv)
            tf.add_to_collection(ep_collection, conv)
    return conv
示例#8
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        branch1x1 = self.branch1x1.forward(x)

        branch7x7 = self.branch7x7_1.forward(x)
        branch7x7 = self.branch7x7_2.forward(branch7x7)
        branch7x7 = self.branch7x7_3.forward(branch7x7)

        branch7x7dbl = self.branch7x7dbl_1.forward(x)
        branch7x7dbl = self.branch7x7dbl_2.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_3.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_4.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_5.forward(branch7x7dbl)

        branch_pool = AveragePooling2D(
            pool_size=(3, 3), strides=(1, 1), padding='same'
        )(x
          )  #branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool.forward(branch_pool)

        outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
        return concatenate(outputs, axis=channel_axis)  #torch.cat(outputs, 1)
示例#9
0
def feature_extraction(sz_input, sz_input2):
    i = Input(shape=(sz_input, sz_input2, 1))
    firstconv = convbn(i, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)
    firstconv = convbn(firstconv, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)

    layer1 = _make_layer(firstconv, 4, 2, 1, 1)  # (?, 32, 32, 4)
    layer2 = _make_layer(layer1, 8, 8, 1, 1)  # (?, 32, 32, 8)
    layer3 = _make_layer(layer2, 16, 2, 1, 1)  # (?, 32, 32, 16)
    layer4 = _make_layer(layer3, 16, 2, 1, 2)  # (?, 32, 32, 16)
    layer4_size = (layer4.get_shape().as_list()[1],
                   layer4.get_shape().as_list()[2])

    branch1 = AveragePooling2D((2, 2), (2, 2),
                               'same',
                               data_format='channels_last')(layer4)
    branch1 = convbn(branch1, 4, 1, 1, 1)
    branch1 = Activation('relu')(branch1)
    branch1 = UpSampling2DBilinear(layer4_size)(branch1)

    branch2 = AveragePooling2D((4, 4), (4, 4),
                               'same',
                               data_format='channels_last')(layer4)
    branch2 = convbn(branch2, 4, 1, 1, 1)
    branch2 = Activation('relu')(branch2)
    branch2 = UpSampling2DBilinear(layer4_size)(branch2)

    branch3 = AveragePooling2D((8, 8), (8, 8),
                               'same',
                               data_format='channels_last')(layer4)
    branch3 = convbn(branch3, 4, 1, 1, 1)
    branch3 = Activation('relu')(branch3)
    branch3 = UpSampling2DBilinear(layer4_size)(branch3)

    branch4 = AveragePooling2D((16, 16), (16, 16),
                               'same',
                               data_format='channels_last')(layer4)
    branch4 = convbn(branch4, 4, 1, 1, 1)
    branch4 = Activation('relu')(branch4)
    branch4 = UpSampling2DBilinear(layer4_size)(branch4)

    output_feature = concatenate(
        [layer2, layer4, branch4, branch3, branch2, branch1], )
    lastconv = convbn(output_feature, 16, 3, 1, 1)
    lastconv = Activation('relu')(lastconv)
    lastconv = Conv2D(4,
                      1, (1, 1),
                      'same',
                      data_format='channels_last',
                      use_bias=False)(lastconv)
    print(lastconv.get_shape())
    model = Model(inputs=[i], outputs=[lastconv])

    return model
示例#10
0
def Fire_modele(x, squeeze=16, expand=64):
    x = Conv2D(squeeze, (1, 1), padding='valid')(x)
    x = Activation('relu')(x)

    left = Conv2D(expand, (1, 1), padding='valid')(x)
    left = Activation('relu')(left)

    right = Conv2D(expand, (3, 3), padding='same')(x)
    right = Activation('relu')(right)

    x = concatenate([left, right], axis=3)
    return x
示例#11
0
    def inference(self):
        ''' 4-Input : Conv - Relu - Conv - BN - Relu '''
        input_stack_90d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_90d')
        input_stack_0d = Input(shape=(self.img_height, self.img_width,
                                      len(self.view_n)),
                               name='input_stack_0d')
        input_stack_45d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_45d')
        input_stack_M45d = Input(shape=(self.img_height, self.img_width,
                                        len(self.view_n)),
                                 name='input_stack_M45d')
        ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu '''
        mid_90d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_90d)
        mid_0d = self.layer1_multistream(self.img_height, self.img_width,
                                         len(self.view_n),
                                         int(self.filt_num))(input_stack_0d)
        mid_45d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_45d)
        mid_M45d = self.layer1_multistream(
            self.img_height, self.img_width, len(self.view_n),
            int(self.filt_num))(input_stack_M45d)
        ''' Merge layers '''
        mid_merged = concatenate([mid_90d, mid_0d, mid_45d, mid_M45d],
                                 name='mid_merged')
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
        mid_merged_ = self.layer2_merged(self.img_height - 6,
                                         self.img_width - 6,
                                         int(4 * self.filt_num),
                                         int(4 * self.filt_num),
                                         self.conv_depth)(mid_merged)
        ''' Last Conv layer : Conv - Relu - Conv '''
        output = self.layer3_last(self.img_height - 18, self.img_width - 18,
                                  int(4 * self.filt_num),
                                  int(4 * self.filt_num))(mid_merged_)

        epinet = Model(inputs=[
            input_stack_90d, input_stack_0d, input_stack_45d, input_stack_M45d
        ],
                       outputs=[output])
        opt = RMSprop(lr=self.learning_rate)
        epinet.compile(optimizer=opt, loss='mae')
        epinet.summary()

        return epinet
示例#12
0
 def forward(self, x):
     if K.image_data_format() == 'channels_first':
         channel_axis = 1
     else:
         channel_axis = -1
     x = self.BN1(x) 
     x = self.relu(x)
     x = self.conv(x)
     x = self.BN2(x)
     x = self.relu2(x)
     new_features = self.conv2(x)                     #new_features = super(_DenseLayer, self).forward(x)
     if self.drop_rate > 0:
         new_features = Dropout(self.drop_rate)(new_features)     #new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
     return concatenate([x, new_features], axis=channel_axis)     #torch.cat([x, new_features], 1)
示例#13
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        #layer_list = []
        x = self.squeeze_activation(
            self.squeeze(x))  #x = self.squeeze_activation(self.squeeze(x))
        #layer_list.append(self.squeeze_activation)

        return concatenate([
            self.expand1x1_activation(self.expand1x1(x)),
            self.expand3x3_activation(self.expand3x3(x))
        ],
                           axis=channel_axis)
示例#14
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        branch3x3 = self.branch3x3.forward(x)

        branch3x3dbl = self.branch3x3dbl_1.forward(x)
        branch3x3dbl = self.branch3x3dbl_2.forward(branch3x3dbl)
        branch3x3dbl = self.branch3x3dbl_3.forward(branch3x3dbl)

        branch_pool = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
            x)  #branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)

        outputs = [branch3x3, branch3x3dbl, branch_pool]
        return concatenate(outputs, axis=channel_axis)  #torch.cat(outputs, 1)
def define_cepinet(sz_input,sz_input2,view_n,conv_depth,filt_num,learning_rate,for_vis = False):
    global feats
    if for_vis:
        feats = []
    else:
        feats = None
    ''' 4-Input : Conv - Relu - Conv - BN - Relu ''' 
    input_stack_90d = Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_90d')
    input_stack_0d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_0d')
#    input_stack_45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_45d')
    input_stack_M45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_M45d')
    num_stacks = 3
    with tf.variable_scope("4-Stream"):
        ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu ''' 
        mid_90d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="90d")(input_stack_90d)
        mid_0d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="0d")(input_stack_0d)    
    #    mid_45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_45d)    
        mid_M45d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="M45d")(input_stack_M45d)   

    with tf.variable_scope("Merge"):
        ''' Merge layers ''' 
        mid_merged = concatenate([mid_90d,mid_0d,mid_M45d],  name='mid_merged')
        
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
        mid_merged_=layer2_merged(sz_input-6,sz_input2-6,int(num_stacks*filt_num),int(num_stacks*filt_num),conv_depth)(mid_merged)

    with tf.variable_scope("Last"):
        ''' Last Conv layer : Conv - Relu - Conv '''
        output=layer3_last(sz_input-18,sz_input2-18,int(num_stacks*filt_num),int(num_stacks*filt_num))(mid_merged_)

    if for_vis:
        feat_outs90d = [feat(input_stack_90d) for feat in feats[0:6]]
        feat_outs0d =  [feat(input_stack_0d) for feat in feats[6:12]]
        feat_outsM45d = [feat(input_stack_M45d) for feat in feats[12:18]]
        outputs = feat_outs90d + feat_outs0d + feat_outsM45d + [output]

    else: 
        outputs = [output]
    model_512 = Model(inputs = [input_stack_90d,input_stack_0d,
#                               input_stack_45d,
                               input_stack_M45d], outputs = outputs)
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary() 
    
    return model_512, feat_names
示例#16
0
def fire_module(x, fire_id, squeeze=16, expand=64):
    s_id = 'fire' + str(fire_id) + '/'

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
    x = Activation('relu', name=s_id + relu + sq1x1)(x)

    left = Convolution2D(expand, (1, 1), padding='valid',
                         name=s_id + exp1x1)(x)
    left = Activation('relu', name=s_id + relu + exp1x1)(left)

    right = Convolution2D(expand, (3, 3), padding='same',
                          name=s_id + exp3x3)(x)
    right = Activation('relu', name=s_id + relu + exp3x3)(right)

    x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
    return x
示例#17
0
print(vision_model(image_input))
print(vision_model(image_input2))

print(vision_model.inputs)
print(vision_model.outputs)

encoded_image = vision_model(image_input)

# Next, let's define a language model to encode the question into a vector.
# Each question will be at most 100 word long,
# and we will index words as integers from 1 to 9999.
question_input = Input(shape=(100, ), dtype='int32')

temp = Embedding(input_dim=10000, output_dim=256, input_length=100)

embedded_question = temp(question_input)

encoded_question = LSTM(256)(embedded_question)

# Let's concatenate the question vector and the image vector:
merged = concatenate([encoded_question, encoded_image])

# And let's train a logistic regression over 1000 words on top:
output = Dense(1000, activation='softmax')(merged)

# This is our final model:
vqa_model = Model(inputs=[image_input, question_input], outputs=output)

print(isinstance(vqa_model, Layer))
print(isinstance(Dense(1000, activation='softmax'), Layer))
#isinstance(Vehicle(), Vehicle)
示例#18
0
    def FlowNetCorr(self, input_left, input_right):

        left_Conv1 = Conv2D(64, (7, 7), (2, 2),
                            padding='same',
                            activation='relu',
                            name='left_Conv1')(input_left)
        left_Conv2 = Conv2D(128, (5, 5), (2, 2),
                            padding='same',
                            activation='relu',
                            name='left_Conv2')(left_Conv1)
        left_Conv3 = Conv2D(256, (5, 5), (2, 2),
                            padding='same',
                            activation='relu',
                            name='left_Conv3')(left_Conv2)

        right_Conv1 = Conv2D(64, (7, 7), (2, 2),
                             padding='same',
                             activation='relu',
                             name='right_Conv1')(input_right)
        right_Conv2 = Conv2D(128, (5, 5), (2, 2),
                             padding='same',
                             activation='relu',
                             name='right_Conv2')(right_Conv1)
        right_Conv3 = Conv2D(256, (5, 5), (2, 2),
                             padding='same',
                             activation='relu',
                             name='right_Conv3')(right_Conv2)

        max_disp = 10
        layer_list = []
        dotLayer = Lambda(lambda x: tf.reduce_sum(
            tf.multiply(x[0], x[1]), axis=-1, keepdims=True),
                          name='dotLayer')
        for i in range(-2 * max_disp, 2 * max_disp + 2, 2):
            for j in range(-2 * max_disp, 2 * max_disp + 2, 2):
                slice_height = int(self.model_in_height / 8) - abs(j)
                slice_width = int(self.model_in_width / 8) - abs(i)
                start_y = abs(j) if j < 0 else 0
                start_x = abs(i) if i < 0 else 0
                top_pad = j if (j > 0) else 0
                bottom_pad = start_y
                left_pad = i if (i > 0) else 0
                right_pad = start_x

                gather_layer = Lambda(lambda x: tf.pad(
                    tf.slice(x,
                             begin=[0, start_y, start_x, 0],
                             size=[-1, slice_height, slice_width, -1]),
                    paddings=[[0, 0], [top_pad, bottom_pad],
                              [left_pad, right_pad], [0, 0]]),
                                      name='gather_{}_{}'.format(
                                          i, j))(right_Conv3)
                current_layer = dotLayer([left_Conv3, gather_layer])
                layer_list.append(current_layer)
        Corr_441 = Lambda(lambda x: tf.concat(x, 3),
                          name='Corr_441')(layer_list)
        Conv_redir = Conv2D(32, (1, 1), (1, 1),
                            padding='same',
                            activation='relu',
                            name='Conv_redir')(left_Conv3)
        Corr = concatenate([Corr_441, Conv_redir], axis=3, name='Corr')

        Conv3_1 = Conv2D(256, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv3_1')(Corr)
        Conv4 = Conv2D(512, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv4')(Conv3_1)
        Conv4_1 = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv4_1')(Conv4)
        Conv5 = Conv2D(512, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv5')(Conv4_1)
        Conv5_1 = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv5_1')(Conv5)
        Conv6 = Conv2D(1024, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv6')(Conv5_1)
        ''' deconvolution layers : deconv5(flow5) - deconv4(flow4) - deconv3(flow3) - deconv2 - prediction '''
        deconv5 = Conv2DTranspose(512, (3, 3), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv5')(Conv6)
        concat1 = concatenate([deconv5, Conv5_1], axis=3, name='concat1')

        flow5 = Conv2D(2, (3, 3), padding='same', name='flow5')(concat1)
        flow5_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow5_up')(flow5)

        deconv4 = Conv2DTranspose(256, (3, 3), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv4')(concat1)
        concat2 = concatenate([deconv4, Conv4_1, flow5_up],
                              axis=3,
                              name='concat2')

        flow4 = Conv2D(1, (3, 3), padding='same', name='flow4')(concat2)
        flow4_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow4_up')(flow4)

        deconv3 = Conv2DTranspose(128, (5, 5), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv3')(concat2)
        concat3 = concatenate([deconv3, Conv3_1, flow4_up],
                              axis=3,
                              name='concat3')

        flow3 = Conv2D(1, (3, 3), padding='same', name='flow3')(concat3)
        flow3_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow3_up')(flow3)

        deconv2 = Conv2DTranspose(64, (5, 5), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv2')(concat3)
        concat4 = concatenate([deconv2, left_Conv2, flow3_up],
                              axis=3,
                              name='concat4')
        prediction = Conv2D(1, (3, 3), padding='same',
                            name='prediction')(concat4)

        return prediction
示例#19
0
    def FlowNetSimple(self, input):
        ''' convolution layers : Conv1 - Conv2 - Conv3 - Conv3_1 - Conv4 - Conv4_1 - Conv5 - Conv5_1 - Conv6 '''
        Conv1 = Conv2D(64, (7, 7), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv1')(input)
        Conv2 = Conv2D(128, (5, 5), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv2')(Conv1)
        Conv3 = Conv2D(256, (5, 5), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv3')(Conv2)
        Conv3_1 = Conv2D(256, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv3_1')(Conv3)
        Conv4 = Conv2D(512, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv4')(Conv3_1)
        Conv4_1 = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv4_1')(Conv4)
        Conv5 = Conv2D(512, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv5')(Conv4_1)
        Conv5_1 = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='Conv5_1')(Conv5)
        Conv6 = Conv2D(1024, (3, 3), (2, 2),
                       padding='same',
                       activation='relu',
                       name='Conv6')(Conv5_1)
        ''' deconvolution layers : deconv5(flow5) - deconv4(flow4) - deconv3(flow3) - deconv2 - prediction '''
        deconv5 = Conv2DTranspose(512, (3, 3), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv5')(Conv6)
        concat1 = concatenate([deconv5, Conv5_1], axis=3, name='concat1')

        flow5 = Conv2D(2, (3, 3), padding='same', name='flow5')(concat1)
        flow5_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow5_up')(flow5)

        deconv4 = Conv2DTranspose(256, (3, 3), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv4')(concat1)
        concat2 = concatenate([deconv4, Conv4_1, flow5_up],
                              axis=3,
                              name='concat2')

        flow4 = Conv2D(1, (3, 3), padding='same', name='flow4')(concat2)
        flow4_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow4_up')(flow4)

        deconv3 = Conv2DTranspose(128, (5, 5), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv3')(concat2)
        concat3 = concatenate([deconv3, Conv3_1, flow4_up],
                              axis=3,
                              name='concat3')

        flow3 = Conv2D(1, (3, 3), padding='same', name='flow3')(concat3)
        flow3_up = Conv2DTranspose(2, (3, 3), (2, 2),
                                   padding='same',
                                   name='flow3_up')(flow3)

        deconv2 = Conv2DTranspose(64, (5, 5), (2, 2),
                                  padding='same',
                                  activation='relu',
                                  name='deconv2')(concat3)
        concat4 = concatenate([deconv2, Conv2, flow3_up],
                              axis=3,
                              name='concat4')
        prediction = Conv2D(1, (3, 3), padding='same',
                            name='prediction')(concat4)

        return prediction
示例#20
0
def define_epinet(sz_input, sz_input2, view_n, conv_depth, filt_num,
                  learning_rate):
    global feats
    ''' 4-Input : Conv - Relu - Conv - BN - Relu '''
    input_stack_90d = Input(shape=(sz_input, sz_input2, len(view_n)),
                            name='input_stack_90d')
    input_stack_0d = Input(shape=(sz_input, sz_input2, len(view_n)),
                           name='input_stack_0d')
    input_stack_45d = Input(shape=(sz_input, sz_input2, len(view_n)),
                            name='input_stack_45d')
    input_stack_M45d = Input(shape=(sz_input, sz_input2, len(view_n)),
                             name='input_stack_M45d')
    ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu '''
    mid_90d = layer1_multistream(sz_input,
                                 sz_input2,
                                 len(view_n),
                                 int(filt_num),
                                 do_vis=True,
                                 name="90d")(input_stack_90d)
    mid_0d = layer1_multistream(sz_input,
                                sz_input2,
                                len(view_n),
                                int(filt_num),
                                do_vis=True,
                                name="0d")(input_stack_0d)
    mid_45d = layer1_multistream(sz_input,
                                 sz_input2,
                                 len(view_n),
                                 int(filt_num),
                                 do_vis=True,
                                 name="45d")(input_stack_45d)
    mid_M45d = layer1_multistream(sz_input,
                                  sz_input2,
                                  len(view_n),
                                  int(filt_num),
                                  do_vis=True,
                                  name="M45d")(input_stack_M45d)
    ''' Merge layers '''
    mid_merged = concatenate([mid_90d, mid_0d, mid_45d, mid_M45d],
                             name='mid_merged')
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    mid_merged_ = layer2_merged(sz_input - 6, sz_input2 - 6, int(4 * filt_num),
                                int(4 * filt_num), conv_depth)(mid_merged)
    ''' Last Conv layer : Conv - Relu - Conv '''
    output = layer3_last(sz_input - 18, sz_input2 - 18, int(4 * filt_num),
                         int(4 * filt_num))(mid_merged_)

    feat_outs90d = [feat(input_stack_90d) for feat in feats[0:1]]
    feat_outs0d = [feat(input_stack_0d) for feat in feats[1:2]]
    feat_outs45d = [feat(input_stack_45d) for feat in feats[2:3]]
    feat_outsM45d = [feat(input_stack_M45d) for feat in feats[3:4]]
    outputs = feat_outs90d + feat_outs0d + feat_outs45d + feat_outsM45d + [
        output
    ]

    model_512 = Model(inputs=[
        input_stack_90d, input_stack_0d, input_stack_45d, input_stack_M45d
    ],
                      outputs=outputs)
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary()

    return model_512, feat_names
    def build(self):

        gru = GRU(units=self.input_shape[-1],
                  return_sequences=False,
                  name='gru')
        bn = BatchNormalization()
        decoder_1 = Dense(units=128, activation='sigmoid', name='decoder-1')
        decoder_2 = Dense(units=self.output_dim,
                          activation='sigmoid',
                          name='decoder-2')
        self.layers.append(gru)
        self.layers.append(decoder_1)
        self.layers.append(decoder_2)

        activations = {}
        for i in range(self.historical_len):
            s_input = self.input_s[i]
            t_input = self.input_t[i]
            if i == 0:
                # hs = bn(s_input)
                hs = gru(s_input)
                activations['s-r-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-r-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(t_input)
                activations['t-r-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-r-{}'.format(i)] = ht

            else:
                # hs = bn(s_input)
                hs = gru(
                    Add()([activations['s-r-{}-o'.format(i - 1)], s_input]))
                activations['s-r-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-r-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(
                    Add()([activations['t-r-{}-o'.format(i - 1)], t_input]))
                activations['s-r-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-r-{}'.format(i)] = ht

        for i in range(self.historical_len):
            idx = self.historical_len - 1 - i
            s_input = self.input_s[idx]
            t_input = self.input_t[idx]
            if i == 0:
                # hs = bn(s_input)
                hs = gru(s_input)
                activations['s-l-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-l-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(t_input)
                activations['t-l-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-l-{}'.format(i)] = ht

            else:
                # hs = bn(s_input)
                hs = gru(
                    Add()([activations['s-l-{}-o'.format(i - 1)], s_input]))
                activations['s-l-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-l-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(
                    Add()([activations['t-r-{}-o'.format(i - 1)], t_input]))
                activations['s-l-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-l-{}'.format(i)] = ht

        s_h_r = concatenate([
            activations['s-r-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        s_h_l = concatenate([
            activations['s-l-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        t_h_r = concatenate([
            activations['t-r-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        t_h_l = concatenate([
            activations['t-l-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)

        s_c = concatenate([s_h_r, s_h_l], axis=1)
        t_c = concatenate([t_h_r, t_h_l], axis=1)
        s_c = Activation('relu')(Reshape((-1, ))(s_c))
        t_c = Activation('relu')(Reshape((-1, ))(t_c))

        # diff = tf.square(tf.subtract(s_c, t_c))
        # tmp = diff.get_shape().as_list()[-1]
        # self.weight = RepeatVector(tmp)(self.weight)
        # self.weight = tf.reshape(self.weight, [-1, tmp])
        # self.diff = tf.multiply(diff, self.weight)

        self.output_s = decoder_2(decoder_1(s_c))
        self.output_t = decoder_2(decoder_1(t_c))

        self.variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.vars = {var.name: var for var in self.variables}

        self._loss()
        self._gradients()

        self.opt_op = self.optimizer.minimize(self.loss)