def resnet_block(X,F1,F2,F3,f=None,s=None,block='convolutional'): """ implements convolutional block F1,F2,F3 are filters for convolutions """ X_=X if(block='convolutional'): #first component X=tf.layers.conv2d(inputs=X,filters=F1,kernel_size=[1,1],strides=[s,s]) X=tf.batch_normalization(inputs=X,axis=-1) X=tf.nn.relu(X) #second component X=tf.layers.conv2d(inputs=X,filters=F2,kernel_size=[f,f],padding='same') X=tf.batch_normalization(inputs=X,axis=-1) X=tf.nn.relu(X) #third component X=tf.layers.conv2d(inputs=X,filetrs=F3,kernel_size=[1,1],strides=[1,1],padding='valid') X=tf.batch_normalization(inputs=X,axis=-1) #the shortcut path X_shortcut=tf.layers.conv2d(inputs=X_,filters=F3,kernel_size=[1,1],strides=[s,s],padding='valid') X_shortcut=tf.layers.batch_normalization(X_shortcut,axis=-1) X=tf.nn.relu(X+X_shortcut) return X elif(block='identity'): #1st component X=tf.layers.conv2d(inputs=X,filters=F1,kernel_size=[1,1],strides=[1,1],padding='valid') X=tf.layers.batch_normalization(X,axis=-1) X=tf.nn.relu(X) #2nd component X=tf.layers.conv2d(inputs=X,filters=F2,kernel_size=[f,f],strides[1,1],padding='same',kernel_initializer=tf.keras.initializers.glorot_uniform(seed=0)) X=tf.layers.batch_normlization(X,axis=-1) X=tf.nn.relu(X) #3rd component X=tf.layers.conv2d(inputs=X,filters=F3,kernel_size=[f,f],strides=[1,1],padding='valid',kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0)) #the shortcut path X=tf.nn.relu(X+X_shortcut) return X
def txenc(s, H, is_train): """ s: 2^k-dimension one-hot message vector (k=2 for QPSK, k=4 for 16-QAM, etc) H: real-valued channel matrix is_train: bool variable to indicate whether is training """ vbit = 2 shape_fc1 = [H.size + s.size, H.size + s.size] # [16 + 4, 16 + 4] for 2 x 2 MIMO shape_fc2 = [H.size + s.size, 4] with tf.variable_scope("disH"): H_v = disH(H, 2) # Layer 1: Concatenate H_v and x input_concat = tf.concat(0, [H_v, s]) # Layer 2: FC/ReLU W_fc1 = tf.get_variable("W_fc1", shape_fc1, initializer=tf.random_normal_initializer()) b_fc1 = tf.get_variable("b_fc1", shape_fc1[1], initiailizer=tf.constant_initializer(1.0)) relu_fc1 = tf.nn.relu(tf.add(tf.matmul(input_concat, W_fc1), b_fc1)) bn_fc1 = batch_norm(relu_fc1, shape_fc1[1], is_train) # Layer 3: FC/Linear W_fc2 = tf.get_variable("W_fc2", shape_fc2, initializer=tf.random_normal_initializer()) b_fc2 = tf.get_variable("b_fc2", shape_fc2[1], initiailizer=tf.constant_initializer(1.0)) linear = tf.add(tf.matmul(bn_fc1, W_fc2), b_fc2) # Layer 4: Normalization mean, var = tf.moments(linear, 0, name='moments') x = tf.batch_normalization(linear, mean, var, tf.constant(0.0), tf.constant(1.0), 0.001) return x
def call(self, x): if not isinstance(x, list): input_shape = model_ops.int_shape(x) else: x = x[0] input_shape = model_ops.int_shape(x) self.build(input_shape) if self.mode == 0 or self.mode == 2: reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] x_normed, mean, std = model_ops.normalize_batch_in_training( x, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon) if self.mode == 0: self.add_update([ model_ops.moving_average_update(self.running_mean, mean, self.momentum), model_ops.moving_average_update(self.running_std, std, self.momentum) ], x) if sorted(reduction_axes) == range(model_ops.get_ndim(x))[:-1]: x_normed_running = tf.nn.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: # need broadcasting broadcast_running_mean = tf.reshape(self.running_mean, broadcast_shape) broadcast_running_std = tf.reshape(self.running_std, broadcast_shape) broadcast_beta = tf.reshape(self.beta, broadcast_shape) broadcast_gamma = tf.reshape(self.gamma, broadcast_shape) x_normed_running = tf.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) # pick the normalized form of x corresponding to the training phase x_normed = model_ops.in_train_phase(x_normed, x_normed_running) elif self.mode == 1: # sample-wise normalization m = model_ops.mean(x, axis=-1, keepdims=True) std = model_ops.sqrt( model_ops.var(x, axis=-1, keepdims=True) + self.epsilon) x_normed = (x - m) / (std + self.epsilon) x_normed = self.gamma * x_normed + self.beta return x_normed
def Generator_p2(k): h_g_proj1 = tf.nn.relu(tf.matmul(k, W_m2_g_proj1) + b_m2_g_proj1) h_g_proj1 = tf.batch_normalization(h_g_proj1) h_g_proj2 = tf.nn.relu(tf.matmul(k, W_m2_g_proj2) + b_m2_g_proj2) h_g_re1 = tf.reshape(h_g_proj2, [-1, 7, 7, 128]) output_shape_g2 = tf.stack([tf.shape(z)[0], 14, 14, 64]) h_g_conv2 = tf.nn.relu( deconv2d(h_g_re1, W_m2_g_conv2, output_shape_g2) + b_m2_g_conv2) output_shape_g3 = tf.stack([tf.shape(z)[0], 28, 28, 32]) h_g_conv3 = tf.nn.relu( deconv2d(h_g_conv2, W_m2_g_conv3, output_shape_g3) + b_m2_g_conv3) output_shape_g4 = tf.stack([tf.shape(z)[0], 28, 28, 1]) h_g_conv4 = tf.nn.sigmoid( deconv2d(h_g_conv3, W_m2_g_conv4, output_shape_g4, stride=[1, 1, 1, 1]) + b_m2_g_conv4) h_g_re4 = tf.reshape(h_g_conv4, [-1, 784]) return h_g_re4