def __init__(self, input_shape, kernel_size, kernel_initializer="glorot_uniform", using_bias=False, activation="relu", **kwargs): """ input_shape: list [None,shape] or [shape] kernel_size: int using_bias: bool activation: string like "relu","sigmoid","leaky_relu","tanh" """ super(Dense, self).__init__() self.initializer = Reconstruction.initializer( kernel_initializer) #定义初始化采用的方法 self.input_shape_zp = Reconstruction.remake_shape(shape=input_shape, dims=2) self.w = tf.Variable(initial_value=self.initializer( shape=[self.input_shape_zp[-1], kernel_size], dtype=tf.float32), trainable=True) self.using_bias = using_bias if using_bias: self.b = tf.Variable( initial_value=tf.zeros(shape=(1, kernel_size), dtype=tf.float32), trainable=True) #节点的偏置也是行向量 才可以正常计算 即对堆叠的batch 都是加载单个batch内 self.activation = Reconstruction.activation(activation)
def build(self,*args,input_shape=None,**kwargs): out_shape,padding,padding_vect = Reconstruction.ConvCalculation(input_shape=input_shape[1:-1], filters=self.filters_zp, kernel_size=self.kernel_size_zp, strides=self.strides_zp, padding=self.padding_zp) self.padding_zp = padding self.padding_vect_zp = padding_vect super(Conv2D,self).build(*args,input_shape=input_shape,**kwargs)
def __init__(self, input_shape, filters, kernel_size, strides=[1, 1, 1], padding="SAME", using_bias=False, sn=False): """ 将卷积统一为 计算参数量 自行padding 然后使用VALID卷积 这样就可以嵌入任意的padding模式了 则该层的输入shape对于SN而言,就需要加上padding_vect变成padding后的值 """ super(Conv3D, self).__init__() self.input_shape_zp = Reconstruction.remake_shape(input_shape, dims=5) #5维的输出张量 self.kernel_size_zp = Reconstruction.remake_kernel_size( kernel_size, dims=3) #3维卷积核 变为[1,x,x,x,1]5D形式 self.strides_zp = Reconstruction.remake_strides( strides, dims=3) #3维步长 变为[1,x,x,x,1]5D形式 self.output_shape_zp, self.padding, self.padding_vect = Reconstruction.ConvCalculation( self.input_shape_zp[1:-1], filters, self.kernel_size_zp[1:-1], self.strides_zp[1:-1], padding) weight_shape = self.kernel_size_zp[1:-1] + [ self.input_shape_zp[-1] ] + [filters] #[D,H,W,IC,OC] self.initializer = Reconstruction.initializer("random_normal", mean=0.0, stddev=0.02) self.w = tf.Variable( initial_value=self.initializer(shape=weight_shape), trainable=True, dtype=tf.float32) self.using_bias = using_bias if using_bias == True: self.b = tf.Variable(tf.zeros(self.output_shape_zp, dtype=tf.float32), trainable=True) self.sn = sn
def __init__(self, input_shape, output_shape, filters, kernel_size, strides=[2, 2, 2], padding="SAME", using_bias=False, sn=False): #输出的深度已经给出 只要给出每个通道的H,W即可 super(UpSampaleConv3D, self).__init__() self.input_shape_zp = Reconstruction.remake_shape( input_shape, dims=5) #变成5维的输出张量 None补充不足的维度(batch) self.output_shape_zp = Reconstruction.remake_shape(output_shape + [filters], dims=5) #变成5维的输出张量 self.kernel_size_zp = Reconstruction.remake_kernel_size( kernel_size, dims=3) #3维卷积核 变为[1,x,x,x,1]5D形式 self.strides_zp = Reconstruction.remake_strides( strides, dims=3) #3维步长 变为[1,x,x,x,1]4D形式 self.padding = padding self.padding, self.padding_vect, self.cut_flag = Reconstruction.Trans2UpsampleCal( self.input_shape_zp[1:-1], self.output_shape_zp[1:-1], filters, self.kernel_size_zp[1:-1], self.strides_zp[1:-1], self.padding) self.up_size = self.strides_zp[1:-1] self.up_op = tf.keras.layers.UpSampling3D( size=self.up_size) #上采样只对非通道和非batch进行 剔除冗余维度 self.strides_zp = Reconstruction.remake_strides([1, 1, 1], dims=3) self.initializer = Reconstruction.initializer("random_normal", mean=0.0, stddev=0.02) weight_shape = self.kernel_size_zp[1:-1] + [ self.input_shape_zp[-1] ] + [filters] #[D,H,W,IC,OC] self.w = tf.Variable( initial_value=self.initializer(shape=weight_shape), trainable=True, dtype=tf.float32) self.using_bias = using_bias if using_bias == True: self.b = tf.Variable(tf.zeros(self.output_shape_zp[1:], dtype=tf.float32), trainable=True) self.sn = sn
def __init__(self, input_shape, output_shape, filters, kernel_size, strides=[1, 1], padding="SAME", using_bias=False, sn=False): super(Conv2DTranspose, self).__init__() self.input_shape_zp = Reconstruction.remake_shape( input_shape, dims=4) #变成4维的输出张量 None补充不足的维度(batch) self.output_shape_zp = Reconstruction.remake_shape(output_shape + [filters], dims=4) #变成4维的输出张量 self.kernel_size_zp = Reconstruction.remake_kernel_size( kernel_size, dims=2) #2维卷积核 变为[1,x,x,1]4D形式 self.strides_zp = Reconstruction.remake_strides( strides, dims=2) #2维步长 变为[1,x,x,1]4D形式 self.padding = padding Reconstruction.ConvTransCheck(input_shape=self.input_shape_zp[1:-1], output_shape=self.output_shape_zp[1:-1], filters=filters, kernel_size=self.kernel_size_zp[1:-1], strides=self.strides_zp[1:-1], padding=self.padding) weight_shape = self.kernel_size_zp[1:-1] + [filters] + [ self.input_shape_zp[-1] ] #[D,H,W,OC,IC] self.initializer = Reconstruction.initializer("random_normal", mean=0.0, stddev=0.02) self.w = tf.Variable( initial_value=self.initializer(shape=weight_shape), trainable=True, dtype=tf.float32) self.using_bias = using_bias if using_bias == True: self.b = tf.Variable(tf.zeros(self.output_shape_zp[1:], dtype=tf.float32), trainable=True) self.sn = sn