def TD_model_prior_masks(input_tensors=None, f1_train=True, stateful=False): f1 = Feature_dcross_res_matt_res_ds_masks() f1.trainable = f1_train if input_tensors is None: xgaus_shape = (shape_r_gaus, shape_c_gaus, nb_gaussian) ximgs_ops_shape = (None, shape_r, shape_c, 3+2*opt_num) input_tensors = [Input(shape=xgaus_shape) for i in range(0,num_frames)] input_tensors.append(Input(shape=ximgs_ops_shape)) Ximgs_ops = input_tensors[-1] Xgaus = input_tensors[:-1] features_out = TimeDistributed(f1)(Ximgs_ops) frame_features, aux_out1, aux_out2, aux_out3, mask3, mask4, mask5 \ = Lambda(Slice_outputs_mask, output_shape=Slice_outs_shape_mask)(features_out) #print('frame_features', K.int_shape(frame_features)) outs = ConvGRU2D(filters=256, kernel_size=(3, 3), padding='same', return_sequences=True, stateful=stateful, name='ConvGRU2D')(frame_features) outs = TimeDistributed(BatchNormalization(name='ConvGRU2D_BN'))(outs) # previously 256 prior_layer1 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init) priors1 = [Lambda(Expand_gaus)(prior_layer1(x)) for x in Xgaus] priors1_merged = Concatenate(axis=-4)(priors1) sal_concat1 = Concatenate(axis=-1)([outs, priors1_merged]) outs = TimeDistributed(Conv2D(1, (1, 1), padding='same', activation='sigmoid'))(sal_concat1) outs = TimeDistributed(BilinearUpSampling2D((8,8)))(outs) aux_out1 = TimeDistributed(BilinearUpSampling2D((8,8)))(aux_out1) aux_out2 = TimeDistributed(BilinearUpSampling2D((8,8)))(aux_out2) aux_out3 = TimeDistributed(BilinearUpSampling2D((8,8)))(aux_out3) # for visualization model = Model(inputs=input_tensors, outputs=[outs, aux_out1, aux_out2, aux_out3, mask3, mask4, mask5 ], name = 'TD_model_prior') return model
def __call__(self, x): tensors = [] for in_tensor in x: shape = K.int_shape(in_tensor) if shape[1] == self.output_dim: tensor = Conv2D(self.cur_channels, strides=1, **self.conv_param)(in_tensor) tensors.append(tensor) if shape[1] > self.output_dim: tensor = Conv2D(self.cur_channels, strides=2, **self.conv_param)(in_tensor) tensors.append(tensor) if shape[1] < self.output_dim: in_tensor = BilinearUpSampling2D( target_size=(self.output_dim, self.output_dim))(in_tensor) tensor = Conv2D(self.cur_channels, strides=1, **self.conv_param)(in_tensor) tensors.append(tensor) if len(tensors) > 1: tensor = Add()(tensors) else: tensor = tensors[0] tensor = BatchNormalization()(tensor) tensor = Activation('relu')(tensor) return tensor
def f(x, y): score = Conv2D(filters=classes, kernel_size=(1, 1), activation='linear', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), name='score_{}'.format(block_name))(x) if y is not None: def scaling(xx, ss=1): return xx * ss scaled = Lambda(scaling, arguments={'ss': scale}, name='scale_{}'.format(block_name))(score) score = add([y, scaled]) upscore = BilinearUpSampling2D( target_shape=target_shape, name='upscore_{}'.format(block_name))(score) return upscore
def FullyConvolutionalNetwork(input_shape=None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21): if batch_shape: img_input = Input(batch_shape=batch_shape) image_size = batch_shape[1:3] else: img_input = Input(shape=input_shape) image_size = input_shape[0:2] # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x) o1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', kernel_regularizer=l2(weight_decay))(o1) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', kernel_regularizer=l2(weight_decay))(x) o2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', kernel_regularizer=l2(weight_decay))(o2) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', kernel_regularizer=l2(weight_decay))(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', kernel_regularizer=l2(weight_decay))(x) o3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', kernel_regularizer=l2(weight_decay))(o3) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', kernel_regularizer=l2(weight_decay))(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', kernel_regularizer=l2(weight_decay))(x) o4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', kernel_regularizer=l2(weight_decay))(o4) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', kernel_regularizer=l2(weight_decay))(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', kernel_regularizer=l2(weight_decay))(x) # Convolutional layers transfered from fully-connected layers x = Conv2D(1024, (3, 3), activation='relu', padding='same', name='fc1', kernel_regularizer=l2(weight_decay))(x) x = Dropout(0.2)(x) o5 = Conv2D(1024, (1, 1), activation='relu', padding='same', name='fc2', kernel_regularizer=l2(weight_decay))(x) #classifying layer out_1 = Conv2D(16, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(o1) out_1 = BilinearUpSampling2D(target_size=tuple(image_size))(out_1) out_2 = Conv2D(32, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(o2) out_2 = BilinearUpSampling2D(target_size=tuple(image_size))(out_2) out_3 = Conv2D(64, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(o3) out_3 = BilinearUpSampling2D(target_size=tuple(image_size))(out_3) out_4 = Conv2D(128, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(o4) out_4 = BilinearUpSampling2D(target_size=tuple(image_size))(out_4) out_5 = Conv2D(256, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(o5) out_5 = BilinearUpSampling2D(target_size=tuple(image_size))(out_5) out = Concatenate()([out_1, out_2, out_3, out_4, out_5]) out = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='sigmoid', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(out) model = Model(img_input, out) weights_path = os.path.expanduser(os.path.join('./model/model_sigm.hdf5')) model.load_weights(weights_path, by_name=True) return model
def get_model(self): inputs = Input(self.input_shape) weight_decay = 1e-4 batch_momentum = 0.9 bn_axis = 3 x = Convolution2D(64, 7, 7, subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(inputs) x = BatchNormalization(axis=bn_axis, name='bn_conv1', momentum=batch_momentum)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(3, [64, 64, 256], stage=2, block='a')(x) x = identity_block(3, [64, 64, 256], stage=2, block='b')(x) x = identity_block(3, [64, 64, 256], stage=2, block='c')(x) x = conv_block(3, [128, 128, 512], stage=3, block='a', strides=(2, 2))(x) x = identity_block(3, [128, 128, 512], stage=3, block='b')(x) x = identity_block(3, [128, 128, 512], stage=3, block='c')(x) x = identity_block(3, [128, 128, 512], stage=3, block='d')(x) x = conv_block(3, [256, 256, 1024], stage=4, block='a', strides=(2, 2))(x) x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x) x = atrous_conv_block(3, [512, 512, 2048], stage=5, block='a', atrous_rate=(2, 2))(x) x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='b', atrous_rate=(2, 2))(x) x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='c', atrous_rate=(2, 2))(x) x = Convolution2D(1, 1, 1, activation='sigmoid', border_mode='same', subsample=(1, 1), W_regularizer=l2(weight_decay))(x) x = BilinearUpSampling2D(target_size=tuple(self.input_shape[0:2]))(x) model = Model(inputs, x) model.summary() return model
def get_model(self): inputs = Input(self.input_shape) weight_decay = 1e-4 # Block 1 x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1', W_regularizer=l2(weight_decay))(inputs) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2', W_regularizer=l2(weight_decay))(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1', W_regularizer=l2(weight_decay))(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2', W_regularizer=l2(weight_decay))(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1', W_regularizer=l2(weight_decay))(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2', W_regularizer=l2(weight_decay))(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3', W_regularizer=l2(weight_decay))(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1', W_regularizer=l2(weight_decay))(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2', W_regularizer=l2(weight_decay))(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3', W_regularizer=l2(weight_decay))(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1', W_regularizer=l2(weight_decay))(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2', W_regularizer=l2(weight_decay))(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3', W_regularizer=l2(weight_decay))(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) # Convolutional layers transfered from fully-connected layers x = Convolution2D(4096, 7, 7, activation='relu', border_mode='same', name='fc1', W_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) x = Convolution2D(4096, 1, 1, activation='relu', border_mode='same', name='fc2', W_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) # classifying layer x = Convolution2D(1, 1, 1, init='he_normal', activation='sigmoid', border_mode='valid', subsample=(1, 1), W_regularizer=l2(weight_decay))(x) x = BilinearUpSampling2D(size=(32, 32))(x) model = Model(inputs, x) # weights_path = os.path.expanduser( # os.path.join('~', '.keras/models/skin_fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5')) # model.load_weights(weights_path, by_name=True) model.summary() return model