def DCGANGenerator(self, n_samples, noise=None, print_arch=False): kernel_width = self.width_kernel # in the time dimension num_features = self.num_features conv1d_II.set_weights_stdev(0.02) deconv1d_II.set_weights_stdev(0.02) linear.set_weights_stdev(0.02) if noise is None: noise = tf.random_normal([n_samples, 128]) if print_arch: print('GENERATOR. -------------------------------') print(str(noise.get_shape()) + ' latent variable') output = linear.Linear('Generator.Input', 128, int(num_features * self.num_bins), noise) if print_arch: print(str(output.get_shape()) + ' linear projection') output = tf.reshape(output, [ -1, int(num_features * 2**self.num_layers), int(self.num_bins / 2**self.num_layers) ]) output = act_funct.LeakyReLU(output) if print_arch: print(str(output.get_shape()) + ' layer 1') for ind_l in range(self.num_layers, 0, -1): if ind_l == 1: output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), int(num_features*2**ind_l), int(self.num_neurons),\ int(kernel_width), output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers)) else: output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), int(num_features*2**ind_l), int(num_features*2**(ind_l-1)),\ int(kernel_width), output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers)) output = act_funct.LeakyReLU(output) if print_arch: print( str(output.get_shape()) + ' layer ' + str(self.num_layers - ind_l + 2)) output = tf.sigmoid(output) conv1d_II.unset_weights_stdev() deconv1d_II.unset_weights_stdev() linear.unset_weights_stdev() output = tf.reshape(output, [-1, self.output_dim]) return output
def DCGANGenerator(self, n_samples, noise=None): kernel_width = self.width_kernel # in the time dimension num_features = self.num_features conv1d_II.set_weights_stdev(0.02) deconv1d_II.set_weights_stdev(0.02) linear.set_weights_stdev(0.02) if noise is None: noise = tf.random_normal([n_samples, 128]) output = linear.Linear('Generator.Input', 128, int(num_features * self.num_bins), noise) print('GENERATOR. -------------------------------') print((output.get_shape())) print('0. -------------------------------') output = tf.reshape(output, [ -1, num_features * 2**self.num_layers, int(self.num_bins / 2**self.num_layers) ]) output = act_funct.LeakyReLU(output) print((output.get_shape())) print('1. -------------------------------') for ind_l in range(self.num_layers, 0, -1): if ind_l == 1: output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), num_features*2**ind_l, self.num_neurons,\ kernel_width, output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers)) else: output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), num_features*2**ind_l, num_features*2**(ind_l-1),\ kernel_width, output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers)) output = act_funct.LeakyReLU(output) print((output.get_shape())) print( str(self.num_layers - ind_l + 1) + '. -------------------------------') output = tf.sigmoid(output) conv1d_II.unset_weights_stdev() deconv1d_II.unset_weights_stdev() linear.unset_weights_stdev() output = tf.reshape(output, [-1, self.output_dim]) print((output.get_shape())) print('6. -------------------------------') return output
def DCGANDiscriminator_sampler(self, inputs): kernel_width = self.width_kernel # in the time dimension num_features = self.num_features #neurons are treated as different channels output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins]) #initialize weights conv1d_II.set_weights_stdev(0.02) deconv1d_II.set_weights_stdev(0.02) linear.set_weights_stdev(0.02) out_puts_mat = [] filters_mat = [] for ind_l in range(self.num_layers): if ind_l == 0: output, filters = conv1d_II.Conv1D( 'Discriminator.' + str(ind_l + 1), self.num_neurons, num_features * 2**(ind_l + 1), kernel_width, output, stride=self.stride, save_filter=True) else: output, filters = conv1d_II.Conv1D( 'Discriminator.' + str(ind_l + 1), num_features * 2**(ind_l), num_features * 2**(ind_l + 1), kernel_width, output, stride=self.stride, save_filter=True) output = act_funct.LeakyReLU(output) out_puts_mat.append(output) filters_mat.append(filters) output = tf.reshape(output, [-1, int(num_features * self.num_bins)]) output = linear.Linear('Discriminator.Output', int(num_features * self.num_bins), 1, output) #unset weights conv1d_II.unset_weights_stdev() deconv1d_II.unset_weights_stdev() linear.unset_weights_stdev() return tf.reshape(output, [-1]), filters_mat, out_puts_mat
def DCGANDiscriminator(self, inputs, print_arch=False): kernel_width = self.width_kernel # in the time dimension num_features = self.num_features #neurons are treated as different channels output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins]) conv1d_II.set_weights_stdev(0.02) deconv1d_II.set_weights_stdev(0.02) linear.set_weights_stdev(0.02) if print_arch: print('DISCRIMINATOR. -------------------------------') print(str(output.get_shape()) + ' input') for ind_l in range(self.num_layers): if ind_l == 0: output = conv1d_II.Conv1D('Discriminator.' + str(ind_l + 1), self.num_neurons, int(num_features * 2**(ind_l + 1)), int(kernel_width), output, stride=self.stride) else: output = conv1d_II.Conv1D('Discriminator.' + str(ind_l + 1), int(num_features * 2**(ind_l)), int(num_features * 2**(ind_l + 1)), int(kernel_width), output, stride=self.stride) output = act_funct.LeakyReLU(output) if print_arch: print(str(output.get_shape()) + ' layer ' + str(ind_l + 1)) output = tf.reshape(output, [-1, int(num_features * self.num_bins)]) if print_arch: print(str(output.get_shape()) + ' fully connected layer') output = linear.Linear('Discriminator.Output', int(num_features * self.num_bins), 1, output) if print_arch: print(str(output.get_shape()) + ' output') conv1d_II.unset_weights_stdev() deconv1d_II.unset_weights_stdev() linear.unset_weights_stdev() return tf.reshape(output, [-1])