예제 #1
0
    def FCDiscriminator_sampler(self, inputs, FC_DIM=512, n_layers=3):
        with tf.variable_scope('Discriminator') as scope:
            scope.reuse_variables()
            output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins])
            conv1d_II.set_weights_stdev(0.02)
            output, filters = conv1d_II.Conv1D('Input',
                                               self.num_neurons,
                                               self.num_features,
                                               self.kernel_width,
                                               output,
                                               stride=1,
                                               save_filter=True)
            output = tf.reshape(output,
                                [-1, self.num_features * self.num_bins])
            outputs_mat = [output]
            output = act_funct.LeakyReLULayer(
                '0', self.num_features * self.num_bins, FC_DIM, output)
            outputs_mat.append(output)
            for i in range(n_layers - 1):
                output = act_funct.LeakyReLULayer('{}'.format(i + 1), FC_DIM,
                                                  FC_DIM, output)
                outputs_mat.append(output)
            output = linear.Linear('Out', FC_DIM, 1, output)
            conv1d_II.unset_weights_stdev()

            return tf.reshape(output, [-1]), [filters], outputs_mat
예제 #2
0
def ReLULayer(name, n_in, n_out, inputs):
    output = linear.Linear(name + '.Linear',
                           n_in,
                           n_out,
                           inputs,
                           initialization='he')
    return tf.nn.relu(output)
예제 #3
0
def LeakyReLULayer(name, n_in, n_out, inputs):
    output = linear.Linear(name + '.Linear',
                           n_in,
                           n_out,
                           inputs,
                           initialization='he')
    return LeakyReLU(output)
예제 #4
0
def FCDiscriminator(inputs, FC_DIM=512, n_layers=3):
    output = LeakyReLULayer('Discriminator.Input', OUTPUT_DIM, FC_DIM, inputs)
    for i in range(n_layers):
        output = LeakyReLULayer('Discriminator.{}'.format(i), FC_DIM, FC_DIM,
                                output)
    output = linear.Linear('Discriminator.Out', FC_DIM, 1, output)

    return tf.reshape(output, [-1])
예제 #5
0
    def FCDiscriminator(self, inputs, n_layers=3):
        output = act_funct.LeakyReLULayer('Discriminator.Input',
                                          self.output_dim, self.num_units,
                                          inputs)
        for i in range(n_layers):
            output = act_funct.LeakyReLULayer('Discriminator.{}'.format(i),
                                              self.num_units, self.num_units,
                                              output)
        output = linear.Linear('Discriminator.Out', self.num_units, 1, output)

        return tf.reshape(output, [-1])
예제 #6
0
def Discriminator(inputs):
    output = tf.transpose(inputs, [0, 2, 1])
    output = cv.Conv1D('Discriminator.Input', len(charmap), DIM, 1, output)
    output = ResBlock('Discriminator.1', output)
    output = ResBlock('Discriminator.2', output)
    output = ResBlock('Discriminator.3', output)
    output = ResBlock('Discriminator.4', output)
    output = ResBlock('Discriminator.5', output)
    output = tf.reshape(output, [-1, SEQ_LEN * DIM])
    output = li.Linear('Discriminator.Output', SEQ_LEN * DIM, 1, output)
    return output
예제 #7
0
def Discriminator(inputs, y, isSupervised=True):
    output = tf.transpose(inputs, [0, 2, 1])
    output = cv.Conv1D('Discriminator.Input', len(charmap), DIM, 1, output)
    output = ResBlock('Discriminator.1', output)
    output = ResBlock('Discriminator.2', output)
    output = ResBlock('Discriminator.3', output)
    output = ResBlock('Discriminator.4', output)
    output = ResBlock('Discriminator.5', output)
    output = tf.reshape(output, [-1, SEQ_LEN * DIM])
    output = tf.concat([output, y], 1)
    output = li.Linear('Discriminator.Output', SEQ_LEN * DIM + 10, 1, output)
    return output
예제 #8
0
 def FCDiscriminator_sampler(self, inputs, n_layers=3):
     output = act_funct.LeakyReLULayer('Discriminator.Input',
                                       self.output_dim, self.num_units,
                                       inputs)
     outputs_mat = [output]
     for i in range(n_layers):
         output = act_funct.LeakyReLULayer('Discriminator.{}'.format(i),
                                           self.num_units, self.num_units,
                                           output)
         outputs_mat.append(output)
     output = linear.Linear('Discriminator.Out', self.num_units, 1, output)
     filters = []
     return tf.reshape(output, [-1]), filters, outputs_mat
예제 #9
0
def FCGenerator(n_samples, noise=None, FC_DIM=512):
    if noise is None:
        noise = tf.random_normal([n_samples, 128])

    output = ReLULayer('Generator.1', 128, FC_DIM, noise)
    output = ReLULayer('Generator.2', FC_DIM, FC_DIM, output)
    output = ReLULayer('Generator.3', FC_DIM, FC_DIM, output)
    output = ReLULayer('Generator.4', FC_DIM, FC_DIM, output)
    output = linear.Linear('Generator.Out', FC_DIM, OUTPUT_DIM, output)

    output = tf.tanh(output)

    return output
예제 #10
0
    def FCGenerator(self, n_samples, noise=None, FC_DIM=512):
        if noise is None:
            noise = tf.random_normal([n_samples, 128])
        output = act_funct.ReLULayer('Generator.1', 128, FC_DIM, noise)
        output = act_funct.ReLULayer('Generator.2', FC_DIM, FC_DIM, output)
        output = act_funct.ReLULayer('Generator.3', FC_DIM, FC_DIM, output)
        output = act_funct.ReLULayer('Generator.4', FC_DIM, FC_DIM, output)
        output = linear.Linear('Generator.Out', FC_DIM, self.output_dim,
                               output)

        output = tf.nn.sigmoid(output)

        return output
예제 #11
0
def Generator(n_samples):
    output = tf.random_normal(shape=[n_samples, 64], dtype=tf.float32)
    output = li.Linear('Generator.Input', 64, SEQ_LEN * DIM, output)
    output = tf.reshape(output, [-1, DIM, SEQ_LEN])
    output = ResBlock('Generator.1', output)
    output = ResBlock('Generator.2', output)
    output = ResBlock('Generator.3', output)
    output = ResBlock('Generator.4', output)
    output = ResBlock('Generator.5', output)
    output = cv.Conv1D('Generator.Output', DIM, len(charmap), 1, output)
    output = tf.transpose(output, [0, 2, 1])
    output = softmax(output)
    # print(output.shape)
    return output
예제 #12
0
def Generator(z, y, isSupervised=True):
    output = tf.concat([z, y], 1)
    output = li.Linear('Generator.Input', 64 + 10, SEQ_LEN * DIM, output)
    output = tf.reshape(output, [-1, DIM, SEQ_LEN])
    output = ResBlock('Generator.1', output)
    output = ResBlock('Generator.2', output)
    output = ResBlock('Generator.3', output)
    output = ResBlock('Generator.4', output)
    output = ResBlock('Generator.5', output)
    output = cv.Conv1D('Generator.Output', DIM, len(charmap), 1, output)
    output = tf.transpose(output, [0, 2, 1])
    output = softmax(output)
    # print(output.shape)
    return output
예제 #13
0
 def FCDiscriminator(self, inputs, print_arch=True):
     output = act_funct.LeakyReLULayer('Discriminator.Input',
                                       self.output_dim, self.num_units,
                                       inputs)
     if print_arch:
         print('DISCRIMINATOR. -------------------------------')
         print(str(output.get_shape()) + ' input')
     for i in range(self.num_layers):
         output = act_funct.LeakyReLULayer('Discriminator.{}'.format(i),
                                           self.num_units, self.num_units,
                                           output)
         if print_arch:
             print(str(output.get_shape()) + ' layer ' + str(i))
     output = linear.Linear('Discriminator.Out', self.num_units, 1, output)
     if print_arch:
         print(str(output.get_shape()) + ' output')
     return tf.reshape(output, [-1])
예제 #14
0
    def DCGANGenerator(self, n_samples, noise=None, print_arch=False):
        kernel_width = self.width_kernel  # in the time dimension
        num_features = self.num_features
        conv1d_II.set_weights_stdev(0.02)
        deconv1d_II.set_weights_stdev(0.02)
        linear.set_weights_stdev(0.02)

        if noise is None:
            noise = tf.random_normal([n_samples, 128])
        if print_arch:
            print('GENERATOR. -------------------------------')
            print(str(noise.get_shape()) + ' latent variable')
        output = linear.Linear('Generator.Input', 128,
                               int(num_features * self.num_bins), noise)
        if print_arch:
            print(str(output.get_shape()) + ' linear projection')
        output = tf.reshape(output, [
            -1,
            int(num_features * 2**self.num_layers),
            int(self.num_bins / 2**self.num_layers)
        ])
        output = act_funct.LeakyReLU(output)
        if print_arch:
            print(str(output.get_shape()) + ' layer 1')
        for ind_l in range(self.num_layers, 0, -1):
            if ind_l == 1:
                output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), int(num_features*2**ind_l), int(self.num_neurons),\
                                              int(kernel_width), output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers))
            else:
                output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), int(num_features*2**ind_l), int(num_features*2**(ind_l-1)),\
                                              int(kernel_width), output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers))
            output = act_funct.LeakyReLU(output)
            if print_arch:
                print(
                    str(output.get_shape()) + ' layer ' +
                    str(self.num_layers - ind_l + 2))

        output = tf.sigmoid(output)

        conv1d_II.unset_weights_stdev()
        deconv1d_II.unset_weights_stdev()
        linear.unset_weights_stdev()
        output = tf.reshape(output, [-1, self.output_dim])

        return output
예제 #15
0
    def DCGANGenerator(self, n_samples, noise=None):
        kernel_width = self.width_kernel  # in the time dimension
        num_features = self.num_features
        conv1d_II.set_weights_stdev(0.02)
        deconv1d_II.set_weights_stdev(0.02)
        linear.set_weights_stdev(0.02)

        if noise is None:
            noise = tf.random_normal([n_samples, 128])

        output = linear.Linear('Generator.Input', 128,
                               int(num_features * self.num_bins), noise)
        print('GENERATOR. -------------------------------')
        print((output.get_shape()))
        print('0. -------------------------------')
        output = tf.reshape(output, [
            -1, num_features * 2**self.num_layers,
            int(self.num_bins / 2**self.num_layers)
        ])
        output = act_funct.LeakyReLU(output)
        print((output.get_shape()))
        print('1. -------------------------------')
        for ind_l in range(self.num_layers, 0, -1):
            if ind_l == 1:
                output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), num_features*2**ind_l, self.num_neurons,\
                                              kernel_width, output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers))
            else:
                output = deconv1d_II.Deconv1D('Generator.'+str(self.num_layers-ind_l+1), num_features*2**ind_l, num_features*2**(ind_l-1),\
                                              kernel_width, output, num_bins=int(2**(self.num_layers-ind_l+1)*self.num_bins/2**self.num_layers))
            output = act_funct.LeakyReLU(output)
            print((output.get_shape()))
            print(
                str(self.num_layers - ind_l + 1) +
                '. -------------------------------')

        output = tf.sigmoid(output)

        conv1d_II.unset_weights_stdev()
        deconv1d_II.unset_weights_stdev()
        linear.unset_weights_stdev()
        output = tf.reshape(output, [-1, self.output_dim])
        print((output.get_shape()))
        print('6. -------------------------------')

        return output
예제 #16
0
    def DCGANDiscriminator_sampler(self, inputs):
        kernel_width = self.width_kernel  # in the time dimension
        num_features = self.num_features
        #neurons are treated as different channels
        output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins])
        #initialize weights
        conv1d_II.set_weights_stdev(0.02)
        deconv1d_II.set_weights_stdev(0.02)
        linear.set_weights_stdev(0.02)
        out_puts_mat = []
        filters_mat = []

        for ind_l in range(self.num_layers):
            if ind_l == 0:
                output, filters = conv1d_II.Conv1D(
                    'Discriminator.' + str(ind_l + 1),
                    self.num_neurons,
                    num_features * 2**(ind_l + 1),
                    kernel_width,
                    output,
                    stride=self.stride,
                    save_filter=True)
            else:
                output, filters = conv1d_II.Conv1D(
                    'Discriminator.' + str(ind_l + 1),
                    num_features * 2**(ind_l),
                    num_features * 2**(ind_l + 1),
                    kernel_width,
                    output,
                    stride=self.stride,
                    save_filter=True)
            output = act_funct.LeakyReLU(output)
            out_puts_mat.append(output)
            filters_mat.append(filters)

        output = tf.reshape(output, [-1, int(num_features * self.num_bins)])
        output = linear.Linear('Discriminator.Output',
                               int(num_features * self.num_bins), 1, output)
        #unset weights
        conv1d_II.unset_weights_stdev()
        deconv1d_II.unset_weights_stdev()
        linear.unset_weights_stdev()

        return tf.reshape(output, [-1]), filters_mat, out_puts_mat
예제 #17
0
 def FCDiscriminator(self, inputs, FC_DIM=512, n_layers=3):
     output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins])
     conv1d_II.set_weights_stdev(0.02)
     output = conv1d_II.Conv1D('Discriminator.Input',
                               self.num_neurons,
                               self.num_features,
                               self.kernel_width,
                               output,
                               stride=1)
     output = tf.reshape(output, [-1, self.num_features * self.num_bins])
     output = act_funct.LeakyReLULayer('Discriminator.0',
                                       self.num_features * self.num_bins,
                                       FC_DIM, output)
     for i in range(n_layers - 1):
         output = act_funct.LeakyReLULayer('Discriminator.{}'.format(i + 1),
                                           FC_DIM, FC_DIM, output)
     output = linear.Linear('Discriminator.Out', FC_DIM, 1, output)
     conv1d_II.unset_weights_stdev()
     return tf.reshape(output, [-1])
예제 #18
0
    def DCGANDiscriminator(self, inputs, print_arch=False):
        kernel_width = self.width_kernel  # in the time dimension
        num_features = self.num_features
        #neurons are treated as different channels
        output = tf.reshape(inputs, [-1, self.num_neurons, self.num_bins])
        conv1d_II.set_weights_stdev(0.02)
        deconv1d_II.set_weights_stdev(0.02)
        linear.set_weights_stdev(0.02)
        if print_arch:
            print('DISCRIMINATOR. -------------------------------')
            print(str(output.get_shape()) + ' input')
        for ind_l in range(self.num_layers):
            if ind_l == 0:
                output = conv1d_II.Conv1D('Discriminator.' + str(ind_l + 1),
                                          self.num_neurons,
                                          int(num_features * 2**(ind_l + 1)),
                                          int(kernel_width),
                                          output,
                                          stride=self.stride)
            else:
                output = conv1d_II.Conv1D('Discriminator.' + str(ind_l + 1),
                                          int(num_features * 2**(ind_l)),
                                          int(num_features * 2**(ind_l + 1)),
                                          int(kernel_width),
                                          output,
                                          stride=self.stride)
            output = act_funct.LeakyReLU(output)
            if print_arch:
                print(str(output.get_shape()) + ' layer ' + str(ind_l + 1))

        output = tf.reshape(output, [-1, int(num_features * self.num_bins)])
        if print_arch:
            print(str(output.get_shape()) + ' fully connected layer')
        output = linear.Linear('Discriminator.Output',
                               int(num_features * self.num_bins), 1, output)
        if print_arch:
            print(str(output.get_shape()) + ' output')
        conv1d_II.unset_weights_stdev()
        deconv1d_II.unset_weights_stdev()
        linear.unset_weights_stdev()

        return tf.reshape(output, [-1])
예제 #19
0
    def FCGenerator(self, n_samples, noise=None, print_arch=True):
        if noise is None:
            noise = tf.random_normal([n_samples, 128])
        if print_arch:
            print('GENERATOR. -------------------------------')
            print(str(noise.get_shape()) + ' latent variable')
        output = act_funct.ReLULayer('Generator.Input', 128, self.num_units,
                                     noise)
        if print_arch:
            print(str(output.get_shape()) + ' linear projection')
        for i in range(self.num_layers):
            output = act_funct.LeakyReLULayer('Generator.{}'.format(i),
                                              self.num_units, self.num_units,
                                              output)
            if print_arch:
                print(str(output.get_shape()) + ' layer ' + str(i))
        output = linear.Linear('Generator.Out', self.num_units,
                               self.output_dim, output)
        if print_arch:
            print(str(output.get_shape()) + ' output')

        output = tf.nn.sigmoid(output)

        return output