def build_embedding_graph(self): self.x = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="X") self.y = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="Y") # H-1 conv self.Wm1_conv = util.weight([self.cnn_size, self.cnn_size, self.channels, self.feature_num], stddev=self.weight_dev, name="W-1_conv", initializer=self.initializer) self.Bm1_conv = util.bias([self.feature_num], name="B-1") Hm1_conv = util.conv2d_with_bias(self.x, self.Wm1_conv, self.cnn_stride, self.Bm1_conv, add_relu=True, name="H-1") # H0 conv self.W0_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="W0_conv", initializer=self.initializer) self.B0_conv = util.bias([self.feature_num], name="B0") self.H_conv[0] = util.conv2d_with_bias(Hm1_conv, self.W0_conv, self.cnn_stride, self.B0_conv, add_relu=True, name="H0") if self.summary: # convert to tf.summary.image format [batch_num, height, width, channels] Wm1_transposed = tf.transpose(self.Wm1_conv, [3, 0, 1, 2]) tf.summary.image("W-1/" + self.model_name, Wm1_transposed, max_outputs=self.log_weight_image_num) util.add_summaries("B-1", self.model_name, self.Bm1_conv, mean=True, max=True, min=True) util.add_summaries("W-1", self.model_name, self.Wm1_conv, mean=True, max=True, min=True) util.add_summaries("B0", self.model_name, self.B0_conv, mean=True, max=True, min=True) util.add_summaries("W0", self.model_name, self.W0_conv, mean=True, max=True, min=True)
def build_reconstruction_graph(self): # HD+1 conv self.WD1_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="WD1_conv", initializer=self.initializer) self.BD1_conv = util.bias([self.feature_num], name="BD1") # HD+2 conv self.WD2_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num+1, self.channels], stddev=self.weight_dev, name="WD2_conv", initializer=self.initializer) self.BD2_conv = util.bias([1], name="BD2") self.Y1_conv = (self.inference_depth) * [None] self.Y2_conv = (self.inference_depth) * [None] self.W = tf.Variable(np.full(fill_value=1.0 / self.inference_depth, shape=[self.inference_depth], dtype=np.float32),name="LayerWeights") # 设置递归层随机变量权重 W_sum = tf.reduce_sum(self.W) # 压缩求和 降维 计算所有递归层权重元素的和 self.y_outputs = self.inference_depth * [None] for i in range(0, self.inference_depth): with tf.variable_scope("Y%d" % (i+1)): self.Y1_conv[i] = util.conv2d_with_bias(self.H_conv[i+1], self.WD1_conv, self.cnn_stride, self.BD1_conv, add_relu=not self.residual, name="conv_1") # 每个Hd卷积结果输入HD+1中做卷积得到d个结果 y_conv = tf.concat([self.Y1_conv[i], self.x], 3) # 将上面结果与输入X相加得到d个 self.Y2_conv[i] = util.conv2d_with_bias(y_conv, self.WD2_conv, self.cnn_stride, self.BD2_conv, add_relu=not self.residual, name="conv_2") # 相加结果再输入HD+2中卷积得到d个结果 self.y_outputs[i] = self.Y2_conv[i] * self.W[i] / W_sum # 平均 每个递归层的输出 if self.summary: util.add_summaries("BD1", self.model_name, self.BD1_conv) util.add_summaries("WD1", self.model_name, self.WD1_conv, mean=True, max=True, min=True) util.add_summaries("WD2", self.model_name, self.WD2_conv, mean=True, max=True, min=True)
def build_embedding_graph(self): self.x = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="X") #输入LR图像 self.y = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="Y") # HR图像 # H-1 conv with tf.variable_scope("W-1_conv"): self.Wm1_conv = util.weight([self.cnn_size, self.cnn_size, self.channels, self.feature_num], stddev=self.weight_dev, name="conv_W", initializer=self.initializer) self.Bm1_conv = util.bias([self.feature_num], name="conv_B") Hm1_conv = util.conv2d_with_bias(self.x, self.Wm1_conv, self.cnn_stride, self.Bm1_conv, add_relu=True, name="H") # H0 conv with tf.variable_scope("W0_conv"): self.W0_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="conv_W", initializer=self.initializer) self.B0_conv = util.bias([self.feature_num], name="conv_B") self.H_conv[0] = util.conv2d_with_bias(Hm1_conv, self.W0_conv, self.cnn_stride, self.B0_conv, add_relu=True,name="H") if self.summary: Wm1_transposed = tf.transpose(self.Wm1_conv, [3, 0, 1, 2]) #把Wm2_conv格式 [3,3,1,96]转换为tf.summary.image格式 [96,3,3,1][batch_num, height, width, channels] tf.summary.image("W-1/" + self.model_name, Wm1_transposed, max_outputs=self.log_weight_image_num) # 为image增加一个summary,这样就能在tensorboard上看到图片了。img=tf.summary.image('input',x,batch_size) util.add_summaries("B-1", self.model_name, self.Bm1_conv, mean=True, max=True, min=True) util.add_summaries("W-1", self.model_name, self.Wm1_conv, mean=True, max=True, min=True) util.add_summaries("B0", self.model_name, self.B0_conv, mean=True, max=True, min=True) util.add_summaries("W0", self.model_name, self.W0_conv, mean=True, max=True, min=True)
def build_inference_graph(self): if self.inference_depth <= 0: return self.W_conv = util.weight( [self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="W_conv", initializer="diagonal") self.B_conv = util.bias([self.feature_num], name="B") for i in range(0, self.inference_depth): self.H_conv[i + 1] = util.conv2d_with_bias(self.H_conv[i], self.W_conv, 1, self.B_conv, name="H%d" % (i + 1)) if self.summary: util.add_summaries("W", self.model_name, self.W_conv, mean=True, max=True, min=True) util.add_summaries("B", self.model_name, self.B_conv, mean=True, max=True, min=True)
def build_inference_graph(self): if self.inference_depth <= 0: return self.WL_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="WL_conv", initializer="diagonal") self.BL_conv = util.bias([self.feature_num], name="BL") self.WS_conv = util.weight([self.cnn_size, self.cnn_size, 1, self.feature_num], stddev=self.weight_dev, name="WS_conv", initializer="diagonal") self.BS_conv = util.bias([self.feature_num], name="BS") for i in range(0, self.inference_depth): self.H_conv[i + 1] = util.conv2d_with_bias(self.H_conv[i], self.WL_conv, 1, self.BL_conv, add_relu=True, name="H%d" % (i + 1)) self.HS_conv[i] = util.conv2d_with_bias(self.net_residual.outputs[:,i,:,:,:], self.WS_conv, 1, self.BS_conv, add_relu=True, name="HS%d" % (i + 1)) self.H_conv[i + 1] = tf.add(self.H_conv[i + 1], self.HS_conv[i]) # tf.summary.image("Feature_map%d/" % (i+1) + self.model_name, self.R_conv, max_outputs=4) self.W_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.channels], stddev=self.weight_dev, name="W_conv", initializer=self.initializer) self.B_conv = util.bias([self.channels], name="B") self.H = util.conv2d_with_bias(self.H_conv[self.inference_depth], self.W_conv, 1, self.B_conv, add_relu=True, name="H") self.y_ = self.H tf.summary.image("prediction/" + self.model_name, self.y_, max_outputs=1) if self.residual: self.y_ = tf.add(self.y_, self.net_image.outputs, name="output") if self.summary: util.add_summaries("W", self.model_name, self.W_conv, mean=True, max=True, min=True) util.add_summaries("B", self.model_name, self.B_conv, mean=True, max=True, min=True) util.add_summaries("BD1", self.model_name, self.BD1_conv) util.add_summaries("WD1", self.model_name, self.WD1_conv, mean=True, max=True, min=True) util.add_summaries("WD2", self.model_name, self.WD2_conv, mean=True, max=True, min=True)
def build_reconstruction_graph(self): # HD+1 conv self.WD1_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.feature_num], stddev=self.weight_dev, name="WD1_conv", initializer=self.initializer) self.BD1_conv = util.bias([self.feature_num], name="BD1") # HD+2 conv self.WD2_conv = util.weight([self.cnn_size, self.cnn_size, self.feature_num, self.channels], stddev=self.weight_dev, name="WD2_conv", initializer=self.initializer) self.BD2_conv = util.bias([1], name="BD2") self.Y1_conv = (self.inference_depth + 1) * [None] self.Y2_conv = (self.inference_depth + 1) * [None] self.W = tf.Variable( np.full(fill_value=1.0 / (self.inference_depth + 1), shape=[self.inference_depth + 1], dtype=np.float32), name="layer_weight") W_sum = tf.reduce_sum(self.W) for i in range(0, self.inference_depth + 1): self.Y1_conv[i] = util.conv2d_with_bias(self.H_conv[i], self.WD1_conv, self.cnn_stride, self.BD1_conv, add_relu=not self.residual, name="Y%d_1" % i) self.Y2_conv[i] = util.conv2d_with_bias(self.Y1_conv[i], self.WD2_conv, self.cnn_stride, self.BD2_conv, add_relu=not self.residual, name="Y%d_2" % i) y_ = tf.multiply(self.W[i], self.Y2_conv[i], name="Y%d_mul" % i) y_ = tf.div(y_, W_sum, name="Y%d_div" % i) if i == 0: self.y_ = y_ else: self.y_ = self.y_ + y_ if self.residual: self.y_ = self.y_ + self.x if self.summary: util.add_summaries("BD1", self.model_name, self.BD1_conv) util.add_summaries("WD1", self.model_name, self.WD1_conv, mean=True, max=True, min=True) util.add_summaries("WD2", self.model_name, self.WD2_conv, mean=True, max=True, min=True)
def build_new_layer(self): self.x = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="X1") self.y = tf.placeholder(tf.float32, shape=[None, None, None, self.channels], name="Y1") # H-1 conv with tf.variable_scope("W-3_conv"): self.Wm1_conv = util.weight([ self.cnn_size, self.cnn_size, self.channels, self.feature_num ], stddev=self.weight_dev, name="conv_W1", initializer=self.initializer) self.Bm1_conv = util.bias([self.feature_num], name="conv_B1") Hm1_conv = util.conv2d_with_bias(self.x, self.Wm1_conv, self.cnn_stride, self.Bm1_conv, add_relu=True, name="H1") # H0 conv with tf.variable_scope("W-2_conv"): self.W0_conv = util.weight([ self.cnn_size, self.cnn_size, self.feature_num, self.feature_num ], stddev=self.weight_dev, name="conv_W1", initializer=self.initializer) self.B0_conv = util.bias([self.feature_num], name="conv_B1") self.H_conv[0] = util.conv2d_with_bias(Hm1_conv, self.W0_conv, self.cnn_stride, self.B0_conv, add_relu=True, name="H1") if self.summary: # convert to tf.summary.image format [batch_num, height, width, channels] Wm1_transposed = tf.transpose(self.Wm1_conv, [3, 0, 1, 2]) tf.summary.image("W-3/" + self.model_name, Wm1_transposed, max_outputs=self.log_weight_image_num) util.add_summaries("B-3", self.model_name, self.Bm1_conv, mean=True, max=True, min=True) util.add_summaries("W-3", self.model_name, self.Wm1_conv, mean=True, max=True, min=True) util.add_summaries("B2", self.model_name, self.B0_conv, mean=True, max=True, min=True) util.add_summaries("W2", self.model_name, self.W0_conv, mean=True, max=True, min=True)