def create_two_branch_model_very_late_fusion(existing='', is_twohundred=False, is_halffeatures=True, channels=4): if len(existing) == 0: print('Loading base model (DenseNet)..') def crop(dimension, start, end): # Crops (or slices) a Tensor on a given dimension from start to end # example : to crop tensor x[:, :, 5:10] # call slice(2, 5, 10) as you want to crop on the second dimension def func(x): if dimension == 0: return x[start: end] if dimension == 1: return x[:, start: end] if dimension == 2: return x[:, :, start: end] if dimension == 3: return x[:, :, :, start: end] if dimension == 4: return x[:, :, :, :, start: end] return Lambda(func) input_rgbd = Input(shape=(None, None, channels), name='input_rgbd') input_rgb = crop(3, 0, 3)(input_rgbd) input_sparse = crop(3, 3, channels)(input_rgbd) #base_model_sz_input = Conv2D(3, (3,3), padding='same')(input_sparse) # Encoder Layers if is_twohundred: base_model = applications.DenseNet201(input_shape=(None, None, 3), include_top=False, weights='imagenet', input_tensor=input_rgb) #base_model_sz = applications.DenseNet201(input_shape=(None, None, 3), include_top=False, weights='imagenet', input_tensor=concatenate([input_sparse, input_sparse, input_sparse], axis=-1)) base_model_sz = applications.DenseNet201(input_shape=(None, None, channels-3), include_top=False, weights=None, input_tensor=input_sparse) else: base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False, weights='imagenet', input_tensor=input_rgb) #base_model_sz = applications.DenseNet169(input_shape=(None, None, 3), include_top=False, weights='imagenet', input_tensor=concatenate([input_sparse, input_sparse, input_sparse], axis=-1)) base_model_sz = applications.DenseNet169(input_shape=(None, None, channels-3), include_top=False, weights=None, input_tensor=input_sparse) pretrained = applications.DenseNet169(input_shape=(None, None, 3), include_top=False, weights='imagenet') #base_model_sz = applications.DenseNet121(input_shape=(None, None, channels-3), include_top=False, weights=None, input_tensor=input_sparse) #pretrained = applications.DenseNet121(input_shape=(None, None, 3), include_top=False, weights='imagenet') for layer in pretrained.layers: print(layer.name) if layer.get_weights() != []: # Skip input, pooling and no weights layers target_layer = base_model_sz.get_layer(name=layer.name) if layer.name != 'conv1/conv': # Initialize imagenet weights in all layers except the first conv1 layer where the channels do not match target_layer.set_weights(layer.get_weights()) print('Base model loaded.') # Layer freezing? for layer in base_model.layers: layer.trainable = True for layer in base_model_sz.layers: layer.trainable = True layer.name = layer.name + str("_sz") # Starting point for decoder #encoder_output = concatenate([base_model.output, base_model_sz.output], axis=-1) #base_model_output_shape = encoder_output.shape base_model_output_shape = base_model.layers[-1].output.shape base_model_output_shape_sz = base_model_sz.layers[-1].output.shape # Starting number of decoder filters decode_filters = int(int(base_model_output_shape[-1])/2) decode_filters_sz = int(int(base_model_output_shape_sz[-1])/2) # Define upsampling layer def upproject(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor) up_i = Concatenate(name=name+'_concat')([up_i, base_model.get_layer(concat_with).output]) # Skip connection up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i # Decoder Layers decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output) decoder = upproject(decoder, int(decode_filters/2), 'up1', concat_with='pool3_pool') decoder = upproject(decoder, int(decode_filters/4), 'up2', concat_with='pool2_pool') decoder = upproject(decoder, int(decode_filters/8), 'up3', concat_with='pool1') decoder = upproject(decoder, int(decode_filters/16), 'up4', concat_with='conv1/relu') if False: decoder = upproject(decoder, int(decode_filters/32), 'up5', concat_with='input_1') def upproject_sz(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor) up_i = Concatenate(name=name+'_concat')([up_i, base_model_sz.get_layer(concat_with+str("_sz")).output]) # Skip connection up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i # Decoder Layers decoder_sz = Conv2D(filters=decode_filters_sz, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2_sz')(base_model.output) decoder_sz = upproject_sz(decoder_sz, int(decode_filters_sz/2), 'up1_sz', concat_with='pool3_pool') decoder_sz = upproject_sz(decoder_sz, int(decode_filters_sz/4), 'up2_sz', concat_with='pool2_pool') decoder_sz = upproject_sz(decoder_sz, int(decode_filters_sz/8), 'up3_sz', concat_with='pool1') decoder_sz = upproject_sz(decoder_sz, int(decode_filters_sz/16), 'up4_sz', concat_with='conv1/relu') if False: decoder_sz = upproject_sz(decoder_sz, int(decode_filters_sz/32), 'up5_sz', concat_with='input_1') # Extract depths (final layer) conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(concatenate([decoder, decoder_sz], axis=-1)) # Create the model model = Model(inputs=input_rgbd, outputs=conv3) else: # Load model from file if not existing.endswith('.h5'): sys.exit('Please provide a correct model file when using [existing] argument.') custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function} model = load_model(existing, custom_objects=custom_objects, compile=False) print('\nExisting model loaded.\n') print('Model created.') return model
def mySpatialModel(model_name, spatial_size, nb_classes, channels, weights_path=None): input_tensor = Input(shape=(channels, spatial_size, spatial_size)) input_shape = (channels, spatial_size, spatial_size) base_model = None predictions = None data_dim = 1024 if model_name == 'ResNet50': input_tensor = Input(shape=(spatial_size, spatial_size, channels)) input_shape = (spatial_size, spatial_size, channels) base_model = kerasApp.ResNet50(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=weights_path, classes=nb_classes, pooling=None) x = base_model.output # 添加自己的全链接分类层 method 1 #x = Flatten()(x) #predictions = Dense(nb_classes, activation='softmax')(x) #method 2 x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'VGG16': input_tensor = Input(shape=(spatial_size, spatial_size, channels)) input_shape = (spatial_size, spatial_size, channels) base_model = kerasApp.VGG16(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=weights_path, classes=nb_classes, pooling=None) x = base_model.output x = GlobalAveragePooling2D()( x) # add a global spatial average pooling layer x = Dense(1024, activation='relu')(x) # let's add a fully-connected layer predictions = Dense(nb_classes, activation='softmax')(x) # and a logistic layer model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'VGG19': base_model = kerasApp.VGG19(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=weights_path, classes=2, pooling=None) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'InceptionV3': input_tensor = Input(shape=(spatial_size, spatial_size, channels)) input_shape = (spatial_size, spatial_size, channels) base_model = kerasApp.InceptionV3(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'InceptionResNetV2': input_tensor = Input(shape=(spatial_size, spatial_size, channels)) input_shape = ( spatial_size, spatial_size, channels, ) base_model = kerasApp.InceptionResNetV2(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) data_dim = 1536 predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'Xception': input_shape_xception = (spatial_size, spatial_size, channels) base_model = kerasApp.Xception(weights=weights_path, include_top=False, pooling="avg", input_shape=input_shape_xception, classes=nb_classes) x = base_model.output predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'DenseNet121': base_model = kerasApp.DenseNet121(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'DenseNet169': base_model = kerasApp.DenseNet169(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'DenseNet201': base_model = kerasApp.DenseNet201(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) elif model_name == 'MobileNet': base_model = kerasApp.MobileNet(weights=weights_path, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) x = Dense(1024, activation='relu')(x) x = Dense(512, activation='relu')(x) data_dim = 512 predictions = Dense(nb_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) else: print("this model--[" + model_name + "]-- doesnt exist!") # 冻结base_model所有层,这样就可以正确获得bottleneck特征 for layer in base_model.layers: layer.trainable = True # 训练模型 model = Model(inputs=base_model.input, outputs=predictions) print('-------------当前base_model模型[' + model_name + "]-------------------\n") print('base_model层数目:' + str(len(base_model.layers))) print('model模型层数目:' + str(len(model.layers))) featureLayer = model.layers[len(model.layers) - 2] print(featureLayer.output_shape) print("data_dim:" + str(featureLayer.output_shape[1])) print("---------------------------------------------\n") #sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True) # 绘制模型 #if plot_model: # plot_model(model, to_file=model_name+'.png', show_shapes=True) return model
def create_model(existing='', is_twohundred=False, is_halffeatures=True): if len(existing) == 0: print('Loading base model (DenseNet)..') # Encoder Layers if is_twohundred: base_model = applications.DenseNet201(input_shape=(None, None, 3), include_top=False) else: base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False) print('Base model loaded.') img_shape = Input(shape=(2, ), name='sh', dtype='int32') sh = K.mean(img_shape, axis=0) # Starting point for decoder base_model_output_shape = base_model.layers[-1].output.shape # Layer freezing? for layer in base_model.layers: layer.trainable = True # Starting number of decoder filters if is_halffeatures: decode_filters = int(int(base_model_output_shape[-1]) / 2) else: decode_filters = int(base_model_output_shape[-1]) # Define upsampling layer def upproject(tensor, filters, name, concat_with, shape_tensor, size): up_i = BilinearUpSampling2D(input_sh=shape_tensor, size=size, name=name + '_upsampling2d')( [shape_tensor, tensor]) up_i = Concatenate(name=name + '_concat')( [up_i, base_model.get_layer(concat_with).output]) # Skip connection up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i # Decoder Layers decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output) decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool', shape_tensor=sh, size=(2, 2)) decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool', shape_tensor=sh, size=(4, 4)) decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='pool1', shape_tensor=sh, size=(8, 8)) decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu', shape_tensor=sh, size=(16, 16)) if False: decoder = upproject(decoder, int(decode_filters / 32), 'up5', concat_with='input_1') # Extract depths (final layer) conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder) # Create the model model = Model(inputs=[base_model.input, img_shape], outputs=conv3) else: # Load model from file if not existing.endswith('.h5'): sys.exit( 'Please provide a correct model file when using [existing] argument.' ) custom_objects = { 'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function } model = load_model(existing, custom_objects=custom_objects) print('\nExisting model loaded.\n') print('Model created.') return model
def create_model(existing='', is_twohundred=False, is_halffeatures=True): if len(existing) == 0: print('Loading base model (DenseNet)..') # Applying pre-trained DenseNet-169 asEncoder Layers base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False) print('Base model loaded.') # Starting point for decoder base_model_output_shape = base_model.layers[-1].output.shape # Setting up Layer freezing for layer in base_model.layers: layer.trainable = True # Starting number of decoder filters if is_halffeatures: decode_filters = int(int(base_model_output_shape[-1]) / 2) else: decode_filters = int(base_model_output_shape[-1]) # Define upsampling layer def upproject(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name + '_upsampling2d')(tensor) up_i = Concatenate(name=name + '_concat')( [up_i, base_model.get_layer(concat_with).output]) # Skip Connection up_i = SeparableConv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')( up_i) # Separable Convolution up_i = BatchNormalization( axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones")( up_i) # Batch Normalization to Avoid Overfitting up_i = LeakyReLU(alpha=0.2)( up_i) # Leaky version of a Rectified Linear Unit up_i = SeparableConv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')( up_i) # Separable Convolution up_i = LeakyReLU(alpha=0.2)(up_i) return up_i # Decoder Layers decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output) decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool') decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool') decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='pool1') decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu') if False: decoder = upproject(decoder, int(decode_filters / 32), 'up5', concat_with='input_1') # Extract depths in the Final Layer conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder) # Create the Model model = Model(inputs=base_model.input, outputs=conv3) else: # Load existing model from filesystem if not existing.endswith('.h5'): sys.exit( 'Please provide a correct model file when using [existing] argument.' ) custom_objects = { 'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function } model = load_model(existing, custom_objects=custom_objects) print('\nExisting model loaded.\n') print('Model created.') return model
input_shape=(256, 256, 3)) elif args.base_model == 'inception_resnetv2': base_model = applications.InceptionResNetV2(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'xception': base_model = applications.Xception(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'densenet121': base_model = applications.DenseNet121(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'densenet169': base_model = applications.DenseNet169(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'densenet201': base_model = applications.DenseNet201(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'nasnetmobile': base_model = applications.NASNetMobile(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) elif args.base_model == 'nasnetlarge': base_model = applications.NASNetLarge(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) else: raise ValueError(
def create_model(is_twohundred=False, is_halffeatures=True): print('Loading base model (DenseNet)..') # Encoder Layers if is_twohundred: base_model = applications.DenseNet201(input_shape=(480, 640, 3), include_top=False) else: base_model = applications.DenseNet169(input_shape=(480, 640, 3), include_top=False) print('Base model loaded.') # Starting point for decoder base_model_output_shape = base_model.layers[-1].output.shape # Layer freezing? for layer in base_model.layers: layer.trainable = True # Starting number of decoder filters if is_halffeatures: decode_filters = int(int(base_model_output_shape[-1]) / 2) else: decode_filters = int(base_model_output_shape[-1]) def expend_as(tensor, rep): return layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=3), arguments={'repnum': rep})(tensor) # Define upsampling layer def upproject(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name + '_upsampling2d')(tensor) up_i = Concatenate(name=name + '_concat')( [up_i, concat_with]) # Skip connection up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i def global_non_local(X,cc): h, w , c = list(X.shape)[1], list(X.shape)[2], list(X.shape)[3] c=cc theta = Conv2D(c, kernel_size=(1,1), padding='same')(X) theta_rsh = Reshape((h*w, c))(theta) phi = Conv2D(c, kernel_size=(1,1), padding='same')(X) phi_rsh = Reshape((c, h*w))(phi) g = Conv2D(c, kernel_size=(1,1), padding='same')(X) g_rsh = Reshape((h*w, c))(g) theta_phi = tf.matmul(theta_rsh, phi_rsh) theta_phi = tf.keras.layers.Softmax()(theta_phi) theta_phi_g = tf.matmul(theta_phi, g_rsh) theta_phi_g = Reshape((h, w, c))(theta_phi_g) theta_phi_g = Conv2D(c*2, kernel_size=(1,1), padding='same')(theta_phi_g) out = Add()([theta_phi_g, X]) return out def gating_signal(input, out_size, batch_norm=False): """ resize the down layer feature map into the same dimension as the up layer feature map using 1x1 conv :param input: down-dim feature map :param out_size:output channel number :return: the gating feature map with the same dimension of the up layer feature map """ x = keras.layers.Conv2D(out_size, (1, 1), padding='same')(input) if batch_norm: x = keras.layers.BatchNormalization()(x) x = keras.layers.Activation('relu')(x) return x def attention_block(x, gating, inter_shape): shape_x = K.int_shape(x) shape_g = K.int_shape(gating) theta_x = layers.Conv2D(inter_shape, (2, 2), strides=(2, 2), padding='same')(x) # 16 shape_theta_x = K.int_shape(theta_x) phi_g = layers.Conv2D(inter_shape, (1, 1), padding='same')(gating) upsample_g = layers.Conv2DTranspose(inter_shape, (3, 3), strides=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]), padding='same')(phi_g) # 16 concat_xg = layers.add([upsample_g, theta_x]) act_xg = layers.Activation('relu')(concat_xg) psi = layers.Conv2D(1, (1, 1), padding='same')(act_xg) sigmoid_xg = layers.Activation('sigmoid')(psi) shape_sigmoid = K.int_shape(sigmoid_xg) upsample_psi = layers.UpSampling2D(size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2]))( sigmoid_xg) # 32 upsample_psi = expend_as(upsample_psi, shape_x[3]) y = layers.multiply([upsample_psi, x]) result = layers.Conv2D(shape_x[3], (1, 1), padding='same')(y) result_bn = layers.BatchNormalization()(result) return result_bn print('decode_filters=',decode_filters) non_local=global_non_local(base_model.output,decode_filters) # Decoder Layers decoder_1 = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(non_local) gating_2=gating_signal(decoder_1,int(decode_filters/2),False) att2=attention_block(base_model.get_layer('pool3_pool').output,gating_2,int(decode_filters/2)) decoder_2 = upproject(decoder_1, int(decode_filters / 2), 'up1', concat_with=att2) gating_3 = gating_signal(decoder_2, int(decode_filters / 4), False) att3 = attention_block(base_model.get_layer('pool2_pool').output, gating_3, int(decode_filters / 4)) decoder_3 = upproject(decoder_2, int(decode_filters / 4), 'up2', concat_with=att3) # conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3_')(decoder) gating_4 = gating_signal(decoder_3,int(decode_filters/8),False) att4 = attention_block(base_model.get_layer('pool1').output,gating_4,int(decode_filters/8)) decoder_4 = upproject(decoder_3, int(decode_filters / 8), 'up3', concat_with=att4) # conv2 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv2_')(decoder) gating_5 = gating_signal(decoder_4,int(decode_filters/16),False) att5 = attention_block(base_model.get_layer('conv1/relu').output,gating_5,int(decode_filters/16)) decoder_5 = upproject(decoder_4, int(decode_filters / 16), 'up4', concat_with=att5) # conv1 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv1_')(decoder) gating_6 = gating_signal(decoder_5,int(decode_filters/32),False) att6=attention_block(base_model.get_layer('input_1').output,gating_6,int(decode_filters/32)) decoder_6 = upproject(decoder_5, int(decode_filters / 32), 'up5', concat_with=att6) conv0 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv0_')(decoder_6) predictions_raw_0 = conv0 * 1000 # predictions_raw_1 = conv1 * 1000 # predictions_raw_2 = conv2 * 1000 # predictions_raw_3 = conv3 * 1000 predictions_0 = tf.clip_by_value(predictions_raw_0, 1.0, 65535.0) # predictions_1 = tf.clip_by_value(predictions_raw_1, 1.0, 65535.0) # predictions_2 = tf.clip_by_value(predictions_raw_2, 1.0, 65535.0) # predictions_3 = tf.clip_by_value(predictions_raw_3, 1.0, 65535.0) final_outputs_0 = tf.cast(tf.clip_by_value(predictions_raw_0, 0, 65535.0), tf.int16) eval_outputs__0 = tf.clip_by_value(predictions_raw_0, 0.1, 65535.0) # final_outputs_1 = tf.cast(tf.clip_by_value(predictions_raw_1, 0, 65535.0), tf.int16) # final_outputs_2 = tf.cast(tf.clip_by_value(predictions_raw_2, 0, 65535.0), tf.int16) # final_outputs_3 = tf.cast(tf.clip_by_value(predictions_raw_3, 0, 65535.0), tf.int16) # Create the model model = Model(inputs=base_model.input, outputs= [predictions_0, final_outputs_0, eval_outputs__0]) model.summary() print('Model created.') return model
def mySpatialModelChannelTest(model_name,spatial_size, nb_classes, channels, channel_first=True, weights_path=None, lr=0.005, decay=1e-6, momentum=0.9,plot_model=True): input_tensor = Input(shape=(channels, spatial_size, spatial_size)) input_shape = (channels, spatial_size, spatial_size) base_model=None predictions=None data_dim=1024 base_model = kerasApp.ResNet50(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=None, classes=nb_classes, pooling=None) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(nb_classes, activation='softmax')(x) # 训练模型 model = Model(inputs=base_model.input, outputs=predictions) print_shape(model,model_name) base_model = kerasApp.VGG16(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=None, classes=nb_classes, pooling=None) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) # add a global spatial average pooling layer x = Dense(1024, activation='relu')(x) # let's add a fully-connected layer predictions = Dense(nb_classes, activation='softmax')(x) # 训练模型 model = Model(inputs=base_model.input, outputs=predictions) print_shape(model, model_name) base_model = kerasApp.VGG19(include_top=False, input_tensor=input_tensor, input_shape=input_shape, weights=None, classes=2, pooling='avg') print_shape(base_model, model_name) base_model = kerasApp.InceptionV3(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) print_shape(base_model, model_name) base_model = kerasApp.InceptionResNetV2(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) x = base_model.output # 添加自己的全链接分类层 x = GlobalAveragePooling2D()(x) predictions = Dense(nb_classes, activation='softmax')(x) # 训练模型 model = Model(inputs=base_model.input, outputs=predictions) print_shape(model, model_name) #channel last input_tensor_Xception = Input(shape=( spatial_size, spatial_size,channels)) input_shape__Xception = (spatial_size, spatial_size,channels) base_model = kerasApp.Xception(weights=None, include_top=False, pooling=None, input_shape=input_shape__Xception, classes=nb_classes) print_shape(base_model, model_name) base_model = kerasApp.DenseNet121(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) print_shape(base_model, model_name) base_model = kerasApp.DenseNet169(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) print_shape(base_model, model_name) base_model = kerasApp.DenseNet201(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes) print_shape(base_model, model_name) input_shape = (channels, spatial_size, spatial_size) base_model = kerasApp.MobileNet(weights=None, include_top=False, pooling=None, input_shape=input_shape, classes=nb_classes)
def create_model(args, is_twohundred=False, is_halffeatures=True): print('Loading base model (DenseNet)..') # Encoder Layers if is_twohundred: base_model = applications.DenseNet201(input_shape=(480, 640, 3), include_top=False) else: base_model = applications.DenseNet169(input_shape=(480, 640, 3), include_top=False) print('Base model loaded.') # Starting point for decoder base_model_output_shape = base_model.layers[-1].output.shape # Layer freezing? for layer in base_model.layers: layer.trainable = True # Starting number of decoder filters if is_halffeatures: decode_filters = int(int(base_model_output_shape[-1]) / 2) else: decode_filters = int(base_model_output_shape[-1]) # Define upsampling layer def upproject(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name + '_upsampling2d')(tensor) up_i = Concatenate(name=name + '_concat')( [up_i, base_model.get_layer(concat_with).output]) # Skip connection up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i # Decoder Layers decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output) decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool') decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool') # conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3_')(decoder) decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='pool1') # conv2 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv2_')(decoder) decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu') # conv1 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv1_')(decoder) decoder = upproject(decoder, int(decode_filters / 32), 'up5', concat_with='input_1') conv0 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv0_')(decoder) predictions_raw_0 = conv0 * 1000 # predictions_raw_1 = conv1 * 1000 # predictions_raw_2 = conv2 * 1000 # predictions_raw_3 = conv3 * 1000 predictions_0 = tf.clip_by_value(predictions_raw_0, 1.0, 65535.0) # predictions_1 = tf.clip_by_value(predictions_raw_1, 1.0, 65535.0) # predictions_2 = tf.clip_by_value(predictions_raw_2, 1.0, 65535.0) # predictions_3 = tf.clip_by_value(predictions_raw_3, 1.0, 65535.0) final_outputs_0 = tf.cast(tf.clip_by_value(predictions_raw_0, 0, 65535.0), tf.int16) eval_outputs__0 = tf.clip_by_value(predictions_raw_0, 0.1, 65535.0) # final_outputs_1 = tf.cast(tf.clip_by_value(predictions_raw_1, 0, 65535.0), tf.int16) # final_outputs_2 = tf.cast(tf.clip_by_value(predictions_raw_2, 0, 65535.0), tf.int16) # final_outputs_3 = tf.cast(tf.clip_by_value(predictions_raw_3, 0, 65535.0), tf.int16) # Create the model model = Model(inputs=base_model.input, outputs= [predictions_0, final_outputs_0, eval_outputs__0]) model.summary() print('Model created.') return model
def create_model(existing=''): if len(existing) == 0: base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False) print('Base Model Loaded') base_model_opt_shape = base_model.layers[-1].output.shape decode_filters = int(base_model_opt_shape[-1]) # Making base model trainable for layer in base_model.layers: layer.trainable = True def upproject(tensor, filters, name, concat_with): up_i = BilinearUpSampling2D((2, 2), name=name + '_upsampling2d')(tensor) # Skip conncetion up_i = Concatenate(name=name + '_concat')( [up_i, base_model.get_layer(concat_with).output]) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')(up_i) up_i = LeakyReLU(alpha=0.2)(up_i) return up_i decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_opt_shape, name='conv2')(base_model.output) decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool') decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool') decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='pool1') decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu') conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder) model = Model(inputs=base_model.input, outputs=conv3) else: custom_objects = { 'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function } model = load_model(existing, custom_objects=custom_objects) return model
def create_model(): print('\n\nCreating Model...') ''' Load DenseNet169 with input tensor 640x480x3 This is the encoder part for our model ''' base_model = applications.DenseNet169(weights='imagenet', input_shape=(None, None, 3), include_top=False) #base_model.summary() ''' model.layers[-1] returns the last layer of the model This is the initial layer for the decoder part ''' base_model_output_shape = base_model.layers[-1].output.shape # Take the last integer. That's the number of filters decode_filters = int(base_model_output_shape[-1]) for layer in base_model.layers: layer.trainable = True ''' BilinearUpSampling2D layers TODO: Implement function class in another file. ''' def upsample2d(tensor, filters, name, concat_with): upsampled_layer = BilinearUpSampling2D( (2, 2), name=name + '_upsampling2d')(tensor) # Concatenated skip connection. There are two skip conns: summation and concatenation. You know the difference. upsampled_layer = Concatenate(name=name + '_concat')( [upsampled_layer, base_model.get_layer(concat_with).output]) upsampled_layer = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_conv2A')(upsampled_layer) upsampled_layer = LeakyReLU(alpha=0.2)(upsampled_layer) upsampled_layer = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_conv2B')(upsampled_layer) upsampled_layer = LeakyReLU(alpha=0.2)(upsampled_layer) return upsampled_layer decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='SAME', input_shape=base_model_output_shape, name='conv2')(base_model.output) decoder = upsample2d(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool') decoder = upsample2d(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool') decoder = upsample2d(decoder, int(decode_filters / 8), 'up3', concat_with='pool1') decoder = upsample2d(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu') # Why if_false? if False: decoder = upsample2d(decoder, int(decode_filters / 32), 'up5', concat_with='input_1') # Grab depths from these multiple concatenated layers conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder) # Append inputs and outputs model = Model(inputs=base_model.input, outputs=conv3) print('\n\nModel Created') return model