def resblock(x_in,num_filters,test_initializer,reg_scale): x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x_in) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.01)(x) x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) x = BatchNormalization()(x) res_scale = 0.2 if res_scale >= 0: res = Lambda(lambda x: x * res_scale)(x) x_in = Conv2DWeightNorm(num_filters, kernel_size=1, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x_in) return add([res, x_in])
def sr_prosr_rcan_upsample(input_shape,scale_ratio): #inputs = Input(shape=input_shape) # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths num_filters = 64 reg_scale = 0 scale_ratio = 2 #num_filters_out = max(64, 3 * scale_ratio**2) num_filters_out = 3 * 2 * 2 inputs = Input(shape=input_shape) #x_in = inputs x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(reg_scale) )(inputs) num_res_layer = 8 def res_blocks(res_in, num_chans): x = resnet_layer(inputs=res_in, num_filters=num_chans ) return x def res_chan_attention_blocks(res_in, num_chans, reduction_ratio): x = resnet_layer(inputs=res_in, num_filters=num_chans ) x = attention_layer(x, 4) return x for l in range(num_res_layer): x = res_blocks(x, num_filters) # x = res_chan_attention_blocks(x,num_filters,4) x_in = Conv2DWeightNorm(3, kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(reg_scale) )(x) #up_samp_skip = BicubicUpscale(2**(i+1))(inputs) x_in = add([x_in, inputs]) outputs = x_in # Instantiate model. model = Model(inputs=inputs, outputs=outputs) return model
def sr_resnet_simp(input_shape,scale_ratio): #inputs = Input(shape=input_shape) # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths num_filters = 64 reg_scale = 0 scale_ratio = 2 num_filters_out = max(64, 3 * scale_ratio**2) inputs = Input(shape=input_shape) #test_initializer = RandomUniform(minval=-0.005, maxval=0.005,seed=None) test_initializer = 'he_normal' num_filters = [256,128,128,80] x1 = Conv2DWeightNorm(num_filters[0], kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(inputs) x1 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(x1) x2 = Conv2DWeightNorm(num_filters[1], kernel_size=3, strides=2, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x1) x2 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(x2) x3 = Conv2DWeightNorm(num_filters[2], kernel_size=3, strides=2, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x2) x3 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(x3) x3_x2 = SubpixelConv2D([None, input_shape[0]//4, input_shape[1]//4], scale=scale_ratio, name='sub_1' )(x3) x2_concat = concatenate([x2, x3_x2]) print(x2.get_shape()) print(x3_x2.get_shape()) print(x2_concat.get_shape()) x2_x2 = SubpixelConv2D([None, input_shape[0] // 2, input_shape[1] // 2], scale = scale_ratio, name = 'sub_2' )(x2_concat) x1_concat = concatenate([x1, x2_x2]) x1_2x = SubpixelConv2D([None, input_shape[0] , input_shape[1]], scale = scale_ratio, name = 'sub_3' )(x1_concat) outputs = Conv2DWeightNorm(3, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x1_2x) # Instantiate model. model = Model(inputs=inputs, outputs=outputs) return model
def sr_resnet84(input_shape,scale_ratio): #inputs = Input(shape=input_shape) # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths num_filters = 64 reg_scale = 0 #scale_ratio = 2 num_filters_out = max(64, 3 * scale_ratio**2) inputs = Input(shape=input_shape) #test_initializer = RandomUniform(minval=-0.005, maxval=0.005,seed=None) test_initializer = 'he_normal' x = Conv2DWeightNorm(256, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(inputs) x = Conv2DWeightNorm(num_filters, kernel_size=1, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) num_res_layer = 8 def res_blocks(res_in, num_chans): x = resnet_layer(inputs=res_in, num_filters=num_chans, kernel_initializer=test_initializer ) return x def res_chan_attention_blocks(res_in, num_chans, reduction_ratio): x = resnet_layer(inputs=res_in, num_filters=num_chans, kernel_initializer=test_initializer ) x = attention_layer(x, 4) return x for l in range(num_res_layer): #x = res_blocks(x,num_filters) x = res_chan_attention_blocks(x,num_filters,4) #print(type(x)) num_filters2 = 256 x = Conv2DWeightNorm(256, kernel_size=1, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) for l in range(4): #x = res_blocks(x,num_filters) x = res_chan_attention_blocks(x,num_filters2,16) pixelshuf_in = Conv2DWeightNorm(num_filters_out, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) up_samp = SubpixelConv2D([None, input_shape[0], input_shape[1], num_filters_out], scale=scale_ratio, name='sub_1' )(pixelshuf_in) #res_out2 = layers.add([res_in2, x]) if 1: pixelshuf_skip_in = Conv2DWeightNorm(num_filters_out, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(inputs) up_samp_skip = SubpixelConv2D([None, input_shape[0], input_shape[1], num_filters_out], scale=scale_ratio, name='sub_2' )(pixelshuf_skip_in) else: up_samp_skip = BicubicUpscale(8)(inputs) res_scale = 0.2 if res_scale >= 0: up_samp = Lambda(lambda x: x * res_scale)(up_samp) outputs = add([up_samp, up_samp_skip]) #outputs = up_samp_skip # Instantiate model. model = Model(inputs=inputs, outputs=outputs) return model
def denoise_resnet(input_shape): inputs = Input(shape=input_shape) reg_scale = 0.001 test_initializer = 'he_normal' #num_filters = 128 x = inputs def resblock(x_in,num_filters,test_initializer,reg_scale): x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x_in) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.01)(x) x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) x = BatchNormalization()(x) res_scale = 0.2 if res_scale >= 0: res = Lambda(lambda x: x * res_scale)(x) x_in = Conv2DWeightNorm(num_filters, kernel_size=1, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x_in) return add([res, x_in]) num_filters = [32, 64, 128, 64] for i in range(4): x = resblock(x,num_filters[i],test_initializer,reg_scale) x = Conv2DWeightNorm(3, kernel_size=3, strides=1, padding='same', kernel_initializer=test_initializer, kernel_regularizer=l2(reg_scale) )(x) x = BatchNormalization()(x) res_scale = 0.2 if res_scale >= 0: res = Lambda(lambda x: x * res_scale)(x) outputs = add([res, inputs]) # Instantiate model. model = Model(inputs=inputs, outputs=outputs) return model
def sr_prosr_rcan_upsample(input_shape,scale_ratio): #inputs = Input(shape=input_shape) # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths num_filters = 64 reg_scale = 0 scale_ratio = 2 #num_filters_out = max(64, 3 * scale_ratio**2) num_filters_out = 3 inputs = Input(shape=input_shape) x_in = inputs for i in range(3): x = Conv2DWeightNorm(num_filters, kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(reg_scale) )(x_in) num_res_layer = 8 def res_blocks(res_in, num_chans): x = resnet_layer(inputs=res_in, num_filters=num_chans ) return x def res_chan_attention_blocks(res_in, num_chans, reduction_ratio): x = resnet_layer(inputs=res_in, num_filters=num_chans ) x = attention_layer(x, 4) return x for l in range(num_res_layer): #x = res_blocks(x,num_filters) x = res_chan_attention_blocks(x,num_filters,4) pixelshuf_in = Conv2DWeightNorm(num_filters_out, kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(reg_scale) )(x) up_samp = UpSampling2D(pixelshuf_in) #up_samp = Subpixel(3, 3, 2, padding='same')(pixelshuf_in) #print(up_samp.get_shape()) up_samp_skip = BicubicUpscale(2**(i+1))(inputs) x_in = add([up_samp, up_samp_skip]) outputs = x_in # Instantiate model. model = Model(inputs=inputs, outputs=outputs) for layer in model.layers: if type(layer) == Subpixel: c, b = layer.get_weights() if scale_ratio == 3: w = icnr_weights(scale=3, shape=c.shape) else: w = icnr_weights(scale=2, shape=c.shape) layer.set_weights([w, b]) return model