コード例 #1
0
def discriminator_net(x, training, opts, name='Discriminator'):
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        sz = opts.img_size // 16
        # input is (sz*16) x (sz*16) x nc
        # state size. (sz*8) x (sz*8) x ndf
        y = conv(x, opts.ndf, 4, 2, 'same', 'conv1')
        y = leaky_relu(y, 0.2)

        # state size. (sz*4) x (sz*4) x (ndf*2)
        y = conv(y, opts.ndf * 2, 4, 2, 'same', 'conv2')
        y = leaky_relu(y, 0.2)

        # state size. (sz*2) x (sz*2) x (ndf*4)
        y = conv(y, opts.ndf * 4, 4, 2, 'same', 'conv3')
        y = leaky_relu(y, 0.2)

        # state size. sz x sz x (ndf*8)
        y = conv(y, opts.ndf * 8, 4, 2, 'same', 'conv4')
        y = leaky_relu(y, 0.2)

        flatten = tf.reshape(y, (opts.batch_size, sz * sz * opts.ndf * 8))

        # discriminator output
        logits = conv(y, 1, sz, 1, 'valid', 'conv5')
        logits = tf.reshape(logits, (-1, 1))

    with tf.variable_scope('QNet', reuse=tf.AUTO_REUSE):
        # Q output
        dim = opts.num_categorical + opts.num_continuous
        q = tf.layers.dense(flatten, dim)

    return logits, q
コード例 #2
0
def discriminator_net(x, training, opts, name='Discriminator'):
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        sz = opts.img_size // 16
        # input is (sz*16) x (sz*16) x nc
        # state size. (sz*8) x (sz*8) x ndf
        y = conv(x, opts.ndf, 4, 2, 'same', 'conv1')
        y = leaky_relu(y, 0.2)

        # state size. (sz*4) x (sz*4) x (ndf*2)
        y = conv(y, opts.ndf * 2, 4, 2, 'same', 'conv2')
        y = leaky_relu(y, 0.2)

        # state size. (sz*2) x (sz*2) x (ndf*4)
        y = conv(y, opts.ndf * 4, 4, 2, 'same', 'conv3')
        y = leaky_relu(y, 0.2)

        # state size. sz x sz x (ndf*8)
        y = conv(y, opts.ndf * 8, 4, 2, 'same', 'conv4')
        y = leaky_relu(y, 0.2)

        # output
        y = conv(y, 1, sz, 1, 'valid', 'conv5')
        logits = tf.reshape(y, (-1, 1))

        return logits
コード例 #3
0
def discriminator(x, y, batch_size, y_dim, reuse=False):
    with tf.variable_scope("discriminator", reuse=reuse):
        init = tf.contrib.layers.xavier_initializer(uniform=True)

        x_img = tf.reshape(x, [batch_size, 28, 28, 1])
        yb = tf.reshape(y, [batch_size, 1, 1, y_dim])
        x = conv_cond_concat(x_img, yb)

        h0 = tf.layers.conv2d(x,
                              1 + y_dim,
                              5,
                              strides=2,
                              activation=leaky_relu,
                              padding='same',
                              kernel_initializer=init)
        h0 = conv_cond_concat(h0, yb)

        h1 = tf.layers.conv2d(h0,
                              64 + y_dim,
                              5,
                              strides=2,
                              padding='same',
                              kernel_initializer=init)
        h1 = tf.layers.batch_normalization(h1, training=True)
        h1 = leaky_relu(h1)
        h1 = tf.reshape(h1, [batch_size, -1])
        h1 = tf.concat([h1, y], 1)

        h2 = tf.layers.dense(h1, 1024, kernel_initializer=init)
        h2 = tf.layers.batch_normalization(h2, training=True)
        h2 = leaky_relu(h2)
        h2 = tf.concat([h2, y], 1)

        logits = tf.layers.dense(h2, 1, kernel_initializer=init)
        return logits
コード例 #4
0
ファイル: modelSVR.py プロジェクト: dkasuga/IM-NET-pytorch_tf
    def __call__(self, view, training=False):

        layer_0 = self.bn_0(self.conv_0(1 - view), training=training)
        layer_0 = leaky_relu(layer_0, 0.02)

        layer_1 = self.res_1(layer_0, training=training)
        layer_2 = self.res_2(layer_1, training=training)

        layer_3 = self.res_3(layer_2, training=training)
        layer_4 = self.res_4(layer_3, training=training)

        layer_5 = self.res_5(layer_4, training=training)
        layer_6 = self.res_6(layer_5, training=training)

        layer_7 = self.res_7(layer_6, training=training)
        layer_8 = self.res_8(layer_7, training=training)

        layer_9 = self.bn_9(self.conv_9(layer_8), training=training)
        layer_9 = leaky_relu(layer_9, 0.02)

        layer_10 = self.conv_10(layer_9)
        layer_10 = tf.reshape(layer_10, [-1, self.z_dim])
        layer_10 = tf.keras.activations.sigmoid(layer_10)

        return layer_10
コード例 #5
0
ファイル: DCCA.py プロジェクト: Badstu/CAKT
 def Fe(self, Y, keep_prob):
     hidden1 = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(Y, self.We1)) + self.be1, keep_prob)
     pred = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(hidden1, self.We2) + self.be2),
         keep_prob)
     return pred
コード例 #6
0
    def __init__(self, emb):
        super().__init__()

        self.net = nn.Sequential(nn.Linear(emb, emb), leaky_relu(0.2),
                                 nn.Linear(emb, 512), leaky_relu(0.2),
                                 nn.Linear(512, 1024), leaky_relu(0.2),
                                 nn.Linear(1024, 2048), leaky_relu(0.2),
                                 nn.Linear(2048, 4096), nn.Sigmoid())
コード例 #7
0
ファイル: model.py プロジェクト: gepettolab/CartoonGAN
def multi_patch_discriminator(input_tensor,
                              patch_size,
                              name='discriminator',
                              reuse=False,
                              use_bn=True):
    #input size 36*36
    with tf.variable_scope(name, reuse=reuse):
        patach_conv_layers = []
        for i in range(4):
            batch_size = tf.shape(input_tensor)[0]
            patch = tf.random_crop(input_tensor,
                                   [batch_size, patch_size, patch_size, 3])
            patch_conv = utils.conv_sn(patch,
                                       32,
                                       3,
                                       name='patch_conv' + str(i))
            if use_bn:
                norm_p = batch_norm(patch_conv, training=True)
            else:
                norm_p = tf.contrib.layers.layer_norm(patch_conv)
            relu_p = utils.leaky_relu(norm_p)
            patach_conv_layers.append(relu_p)

        patch_concat = tf.concat(patach_conv_layers, axis=-1)

        conv1 = utils.conv_sn(patch_concat, 128, 3, stride=2, name='conv1')
        if use_bn:
            norm1 = batch_norm(conv1, training=True)
        else:
            norm1 = tf.contrib.layers.layer_norm(conv1)
        relu1 = utils.leaky_relu(norm1)

        conv2 = utils.conv_sn(relu1, 256, 3, name='conv2')
        if use_bn:
            norm2 = batch_norm(conv2, training=True)
        else:
            norm2 = tf.contrib.layers.layer_norm(conv2)
        relu2 = utils.leaky_relu(norm2)

        conv3 = utils.conv_sn(relu2, 256, 3, stride=2, name='conv3')
        if use_bn:
            norm3 = batch_norm(conv3, training=True)
        else:
            norm3 = tf.contrib.layers.layer_norm(conv3)
        relu3 = utils.leaky_relu(norm3)

        conv4 = utils.conv_sn(relu3, 512, 3, name='conv4')
        if use_bn:
            norm4 = batch_norm(conv4, training=True)
        else:
            norm4 = tf.contrib.layers.layer_norm(conv4)
        relu4 = utils.leaky_relu(norm4)

        conv_out = utils.conv_sn(relu4, 1, 1, name='conv7')
        avg_pool = tf.reduce_mean(conv_out, axis=[1, 2])
        #sprint(avg_pool.get_shape())

        return avg_pool
コード例 #8
0
ファイル: DCCA.py プロジェクト: Badstu/CAKT
 def Fx(self, X, keep_prob):
     hidden1 = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(X, self.Wx1) + self.bx1), keep_prob)
     hidden2 = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(hidden1, self.Wx2) + self.bx2),
         keep_prob)
     hidden3 = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(hidden2, self.Wx3) + self.bx3),
         keep_prob)
     return hidden3
コード例 #9
0
ファイル: modelSVR.py プロジェクト: dkasuga/IM-NET-pytorch_tf
 def __call__(self, input, training=False):
     if self.dim_in == self.dim_out:
         output = self.bn_1(self.conv_1(input), training=training)
         output = leaky_relu(output, 0.02)
         output = self.bn_2(self.conv_2(output), training=training)
         output = output + input
         output = leaky_relu(output, 0.02)
     else:
         output = self.bn_1(self.conv_1(input), training=training)
         output = leaky_relu(output, 0.02)
         output = self.bn_2(self.conv_2(output), training=training)
         input_ = self.bn_s(self.conv_s(input), training=training)
         output = output + input_
         output = leaky_relu(output, 0.02)
     return output
コード例 #10
0
ファイル: adan.py プロジェクト: helang818/adan
    def discriminator(self, x, reuse=False):
        with tf.variable_scope('discriminator', reuse=reuse):
            w_init = tf.contrib.layers.xavier_initializer()

            dense1 = tf.layers.dense(x, 100, kernel_initializer=w_init)
            relu1 = leaky_relu(dense1, 0.2)
            #relu1 = tf.nn.tanh(dense1)

            dense2 = tf.layers.dense(relu1, 100, kernel_initializer=w_init)
            relu2 = leaky_relu(dense2, 0.2)
            #relu2 = tf.nn.tanh(dense2)

            vadility = tf.layers.dense(relu2, 1, kernel_initializer=w_init)
            prob = tf.nn.sigmoid(vadility)

            return vadility, prob
コード例 #11
0
 def run_model(self, data, keep_prob):
     _, reshaped = self.run_cnn(data, keep_prob)
     #f1 = tf.nn.dropout(utils.leaky_relu(tf.matmul(reshaped, self.Wf1) + self.bf1), keep_prob)
     #f2 = tf.nn.dropout(utils.leaky_relu(tf.matmul(f1, self.Wf2) + self.bf2), keep_prob)
     fc = tf.nn.dropout(utils.leaky_relu(tf.matmul(reshaped, self.Wf3) + self.bf3), keep_prob)
     output = tf.matmul(fc, self.Wf4)+ self.bf4
     return output
コード例 #12
0
ファイル: model.py プロジェクト: nlpaueb/aueb-bioasq6
    def GetQDScore(self, qwords, qreps, dwords, dreps, extra):
        nq = len(qreps)
        nd = len(dreps)
        qgl = [
            self.W_gate.expr() *
            dy.concatenate([qv, dy.constant(1, self.idf_val(qw))])
            for qv, qw in zip(qreps, qwords)
        ]
        qgates = dy.softmax(dy.concatenate(qgl))

        qscores = []
        for qtok in range(len(qreps)):
            qrep = qreps[qtok]
            att_scores = [dy.dot_product(qrep, drep) for drep in dreps]
            att_probs = dy.softmax(dy.concatenate(att_scores))
            doc_rep = dy.esum([v * p for p, v in zip(att_probs, dreps)])
            input_vec = dy.cmult(qrep, doc_rep)
            #input_dot = dy.sum_elems(input_vec)
            #input_len = dy.l2_norm(qrep - doc_rep)
            #input_vec = dy.concatenate([input_vec, input_dot, input_len])

            layer = utils.leaky_relu(self.b_term.expr() +
                                     self.W_term.expr() * input_vec)
            score = (self.b_term2.expr() + self.W_term2.expr() * layer)
            qscores.append(score)

        # Final scores and ultimate classifier.
        qterm_score = dy.dot_product(dy.concatenate(qscores), qgates)

        fin_score = (
            self.b_final.expr() +
            self.W_final.expr() * dy.concatenate([qterm_score, extra]))
        return fin_score
コード例 #13
0
    def __init__(self, emb, depth):
        super().__init__()

        layers = []
        for i in range(depth):
            layers.extend([nn.Linear(emb, emb), leaky_relu(0.2)])

        self.net = nn.Sequential(*layers)
コード例 #14
0
ファイル: adan.py プロジェクト: helang818/adan
    def classification(self, x, y_dim, reuse=False):
        with tf.variable_scope('classifier', reuse=reuse):
            w_init = tf.contrib.layers.xavier_initializer()

            dense1 = tf.layers.dense(x, 400, kernel_initializer=w_init)
            relu1 = leaky_relu(dense1, 0.2)
            #            relu1 = tf.nn.tanh(dense1)
            #            relu1 = tf.nn.dropout(relu1,0.5)

            dense2 = tf.layers.dense(relu1, 400, kernel_initializer=w_init)
            relu2 = leaky_relu(dense2, 0.2)
            #            relu2 = tf.nn.tanh(dense2)
            #            relu2 = tf.nn.dropout(relu2,0.5)

            y_label = tf.layers.dense(relu2, y_dim, kernel_initializer=w_init)
            y_ = tf.nn.softmax(y_label, name='emotion_layer')

            return y_label, y_
コード例 #15
0
 def run_model(self, data, keep_prob):
     _, reshaped = self.run_cnn(data, keep_prob)
     #f1 = tf.nn.dropout(utils.leaky_relu(tf.matmul(reshaped, self.Wf1) + self.bf1), keep_prob)
     #f2 = tf.nn.dropout(utils.leaky_relu(tf.matmul(f1, self.Wf2) + self.bf2), keep_prob)
     fc = tf.nn.dropout(
         utils.leaky_relu(tf.matmul(reshaped, self.Wf3) + self.bf3),
         keep_prob)
     output = tf.matmul(fc, self.Wf4) + self.bf4
     return output
コード例 #16
0
    def __call__(self, inputs, training=False):
        d_1 = self.in_1(self.conv_1(inputs), training=training)
        d_1 = leaky_relu(d_1, 0.02)

        d_2 = self.in_2(self.conv_2(d_1), training=training)
        d_2 = leaky_relu(d_2, 0.02)

        d_3 = self.in_3(self.conv_3(d_2), training=training)
        d_3 = leaky_relu(d_3, 0.02)

        d_4 = self.in_4(self.conv_4(d_3), training=training)
        d_4 = leaky_relu(d_4, 0.02)

        d_5 = self.conv_5(d_4)
        d_5 = tf.reshape(d_5, [-1, self.z_dim])

        d_5 = tf.keras.activations.sigmoid(d_5)

        return d_5
コード例 #17
0
ファイル: model.py プロジェクト: abyssparanoia/GAN-in-mnist
    def discriminate(self, img):
        #1st layer
        conv1 = tf.nn.conv2d(
            img, self.d_W1, strides=[1, 2, 2, 1], padding='SAME')
        y1 = leaky_relu(conv1)

        #2nd layer
        conv2 = tf.nn.conv2d(
            y1, self.d_W2, strides=[1, 2, 2, 1], padding="SAME") + self.d_b2
        y2 = leaky_relu(conv2)

        #3rd layer
        vec, _ = tensor_to_vector(y2)
        fc1 = tf.matmul(vec, self.d_W3) + self.d_b3
        y3 = leaky_relu(fc1)

        #4th layer
        fc2 = tf.matmul(y3, self.d_W4) + self.d_b4

        return fc2
コード例 #18
0
ファイル: adan.py プロジェクト: helang818/adan
    def generator(self, z, y, out_dim, reuse=False):
        with tf.variable_scope('generator', reuse=reuse):
            w_init = tf.contrib.layers.xavier_initializer()
            cat1 = tf.concat([z, y], 1)

            dense1 = tf.layers.dense(cat1, 100, kernel_initializer=w_init)
            relu1 = leaky_relu(dense1, 0.2)
            #            relu1 = tf.nn.tanh(dense1)
            #            relu1 = tf.nn.dropout(relu1,rate)

            dense2 = tf.layers.dense(relu1, 100, kernel_initializer=w_init)
            relu2 = leaky_relu(dense2, 0.2)
            #            relu2 = tf.nn.tanh(dense2)
            #            relu2 = tf.nn.dropout(relu2,rate)

            output_layer = tf.layers.dense(relu2,
                                           out_dim,
                                           kernel_initializer=w_init)
            output_layer = tf.nn.tanh(output_layer)

            return output_layer
コード例 #19
0
def discriminator(x, alpha=0.2, reuse=False):
    with tf.variable_scope('discriminator', reuse=reuse):
        # Input layer is 32x32x3
        x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same')
        x1 = leaky_relu(x1, alpha=alpha)
        # 16x16x64
        
        x2 = tf.layers.conv2d(x1, 128, 5, strides=2, padding='same')
        x2 = tf.layers.batch_normalization(x2, training=True)
        x2 = leaky_relu(x2, alpha=alpha)
        # 8x8x128
        
        x3 = tf.layers.conv2d(x2, 256, 5, strides=2, padding='same')
        x3 = tf.layers.batch_normalization(x3, training=True)
        x3 = leaky_relu(x3, alpha=alpha)
        # 4x4x256

        # Flatten it
        x3_flat = tf.reshape(x3, (-1, 4*4*256))
        logits = tf.layers.dense(x3_flat, 1)
        return logits
コード例 #20
0
    def __call__(self, points, z, training=False):
        zs = tf.broadcast_to(tf.reshape(z, [-1, 1, self.z_dim]),
                             [z.shape[0], points.shape[1], self.z_dim])
        pointz = tf.concat([points, zs], axis=2)

        l1 = self.linear_1(pointz)
        l1 = leaky_relu(l1, 0.02)

        l2 = self.linear_2(l1)
        l2 = leaky_relu(l2, 0.02)

        l3 = self.linear_3(l2)
        l3 = leaky_relu(l3, 0.02)

        l4 = self.linear_4(l3)
        l4 = leaky_relu(l4, 0.02)

        l5 = self.linear_5(l4)
        l5 = leaky_relu(l5, 0.02)

        l6 = self.linear_6(l5)
        l6 = leaky_relu(l6, 0.02)

        l7 = self.linear_7(l6)

        l7 = tf.math.maximum(tf.math.minimum(l7, l7 * 0.01 + 0.99), l7 * 0.01)

        return l7
コード例 #21
0
ファイル: gan_model.py プロジェクト: zfxu/GAN-tensorflow
    def discriminator_templete(self, input):
        with tf.name_scope("discriminator_network"):
            h1 = tf.layers.conv2d(
                input,
                64, (4, 4), (2, 2),
                name='conv1',
                kernel_initializer=tf.truncated_normal_initializer(stddev=.02))
            ################################################################################
            h2 = tf.layers.conv2d(
                h1,
                128, (4, 4), (2, 2),
                name='conv2',
                kernel_initializer=tf.truncated_normal_initializer(stddev=.02))
            bn_h2 = leaky_relu(
                tf.layers.batch_normalization(h2,
                                              momentum=.9,
                                              epsilon=1e-5,
                                              training=self.is_training))
            flatten_h2 = tf.contrib.layers.flatten(bn_h2)
            ################################################################################
            h3 = tf.layers.dense(
                flatten_h2,
                1024,
                name='fc1',
                kernel_initializer=tf.random_normal_initializer(stddev=.02))
            bn_h3 = leaky_relu(
                tf.layers.batch_normalization(h3,
                                              momentum=.9,
                                              epsilon=1e-5,
                                              training=self.is_training))
            ################################################################################

            logits = tf.layers.dense(
                bn_h3,
                1,
                name='output',
                kernel_initializer=tf.random_normal_initializer(stddev=.02))
            out = tf.nn.sigmoid(logits)
            return out, logits
コード例 #22
0
def discriminator(x, reuse=False):
    with tf.variable_scope("discriminator", reuse=reuse):
        init = tf.contrib.layers.xavier_initializer(uniform=True)

        h0 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same', kernel_initializer=init)
        
        h1 = tf.layers.conv2d(h0, 64*2, 5, strides=2, padding='same', kernel_initializer=init)
        h1 = tf.layers.batch_normalization(h1, training=True)
        h1 = leaky_relu(h1)

        h2 = tf.layers.conv2d(h1, 64*4, 5, strides=2, padding='same', kernel_initializer=init)
        h2 = tf.layers.batch_normalization(h2, training=True)
        h2 = leaky_relu(h2)
        
        h3 = tf.layers.conv2d(h2, 64*8, 5, strides=2, padding='same', kernel_initializer=init)
        h3 = tf.layers.batch_normalization(h3, training=True)
        h3 = leaky_relu(h3)
        
        shape = int(np.prod(h3.get_shape()[1:]))
        h3_flat = tf.reshape(h3, [-1, shape])
        h4 = tf.layers.dense(h3_flat, 1, kernel_initializer=init)
        return h4
コード例 #23
0
ファイル: adan.py プロジェクト: helang818/adan
    def decoder(self, h, fdim, reuse=False):
        with tf.variable_scope('decoder', reuse=reuse):
            w_init = tf.contrib.layers.xavier_initializer()

            dense1 = tf.layers.dense(h, 800, kernel_initializer=w_init)
            relu1 = leaky_relu(dense1, 0.2)
            relu1 = tf.layers.batch_normalization(relu1)
            #            relu1 = tf.nn.tanh(dense1)
            #            relu1 = tf.nn.dropout(relu1,0.5)

            dense2 = tf.layers.dense(relu1, 800, kernel_initializer=w_init)
            relu2 = leaky_relu(dense2, 0.2)
            relu2 = tf.layers.batch_normalization(relu2)
            #            relu2 = tf.nn.tanh(dense2)
            #            relu2 = tf.nn.dropout(relu2,0.5)

            output_layer = tf.layers.dense(relu2,
                                           fdim,
                                           kernel_initializer=w_init)
            output_layer = tf.nn.tanh(output_layer)

            return output_layer
コード例 #24
0
ファイル: model.py プロジェクト: gepettolab/CartoonGAN
def patch_discriminator(input_tensor,
                        patch_size,
                        name='discriminator',
                        reuse=False,
                        use_bn=True):
    #input size 32*32
    with tf.variable_scope(name, reuse=reuse):

        batch_size = tf.shape(input_tensor)[0]
        patch = tf.random_crop(input_tensor,
                               [batch_size, patch_size, patch_size, 3])

        conv1 = utils.conv_sn(patch, 32, 3, name='conv1')
        if use_bn:
            norm1 = batch_norm(conv1, training=True)
        else:
            norm1 = tf.contrib.layers.layer_norm(conv1)
        relu1 = utils.leaky_relu(norm1)

        conv2 = utils.conv_sn(relu1, 32, 3, stride=2, name='conv2')
        if use_bn:
            norm2 = batch_norm(conv2, training=True)
        else:
            norm2 = tf.contrib.layers.layer_norm(conv2)
        relu2 = utils.leaky_relu(norm2)

        conv3 = utils.conv_sn(relu2, 64, 3, name='conv3')
        if use_bn:
            norm3 = batch_norm(conv3, training=True)
        else:
            norm3 = tf.contrib.layers.layer_norm(conv3)
        relu3 = utils.leaky_relu(norm3)

        conv4 = utils.conv_sn(relu3, 64, 3, stride=2, name='conv4')
        if use_bn:
            norm4 = batch_norm(conv4, training=True)
        else:
            norm4 = tf.contrib.layers.layer_norm(conv4)
        relu4 = utils.leaky_relu(norm4)

        conv5 = utils.conv_sn(relu4, 128, 3, name='conv5')
        if use_bn:
            norm5 = batch_norm(conv5, training=True)
        else:
            norm5 = tf.contrib.layers.layer_norm(conv5)
        relu5 = utils.leaky_relu(norm5)

        conv6 = utils.conv_sn(relu5, 128, 3, stride=2, name='conv6')
        if use_bn:
            norm6 = batch_norm(conv6, training=True)
        else:
            norm6 = tf.contrib.layers.layer_norm(conv6)
        relu6 = utils.leaky_relu(norm6)

        conv_out = utils.conv_sn(relu6, 1, 1, name='conv7')
        avg_pool = tf.reduce_mean(conv_out, axis=[1, 2])
        #sprint(avg_pool.get_shape())

        return avg_pool
コード例 #25
0
ファイル: model.py プロジェクト: nlpaueb/aueb-bioasq6
    def Conv(self, input_vecs):
        vecs_tri = []
        for tok in range(len(input_vecs)):
            ptok = (input_vecs[tok - 1] if tok > 0 else self.pad[0])
            ntok = (input_vecs[tok + 1]
                    if tok < len(input_vecs) - 1 else self.pad[1])
            ctok = input_vecs[tok]
            input_vec = dy.concatenate([ctok, ptok, ntok])
            cvec = utils.leaky_relu(self.W_conv.expr() * input_vec +
                                    self.b_conv.expr())
            vecs_tri.append(cvec)

        conv_vecs = [dy.esum([iv, tv]) for iv, tv in zip(input_vecs, vecs_tri)]
        return conv_vecs
コード例 #26
0
ファイル: adan.py プロジェクト: helang818/adan
    def encoder(self, x, out_dim):
        with tf.variable_scope('encoder'):
            w_init = tf.contrib.layers.xavier_initializer()

            dense1 = tf.layers.dense(x, 800, kernel_initializer=w_init)
            relu1 = leaky_relu(dense1, 0.2)
            relu1 = tf.layers.batch_normalization(relu1)
            #            relu1 = tf.nn.tanh(dense1)
            #            relu1 = tf.nn.dropout(relu1,0.5)

            dense2 = tf.layers.dense(relu1, 800, kernel_initializer=w_init)
            relu2 = leaky_relu(dense2, 0.2)
            relu2 = tf.layers.batch_normalization(relu2)
            #            relu2 = tf.nn.tanh(dense2)
            #            relu2 = tf.nn.dropout(relu2,0.5)

            output_layer = tf.layers.dense(relu2,
                                           out_dim,
                                           kernel_initializer=w_init,
                                           name='encode_layer')
            output_layer = tf.nn.tanh(output_layer)

            return output_layer
コード例 #27
0
ファイル: layer.py プロジェクト: kuntzer/pylae
	def activate(self, activation):
		
		if(self.visible_type == "SIGMOID"):
			m_output = utils.sigmoid(activation)
		elif(self.visible_type == "RELU"):
			m_output = utils.relu(activation)
		elif(self.visible_type == "LEAKY_RELU"):
			m_output = utils.leaky_relu(activation)
		elif(self.visible_type == "LINEAR"):
			m_output = activation
		else:
			raise NotImplemented("Unrecogonised hidden type")
	
		return m_output
コード例 #28
0
def generator(z, output_dim=3, alpha=0.2, reuse=False, training=True):
    with tf.variable_scope('generator', reuse=reuse):
        x1 = tf.layers.dense(z, 4*4*512)
        x1 = tf.reshape(x1, (-1, 4, 4, 512))
        x1 = tf.layers.batch_normalization(x1, training=training)
        x1 = leaky_relu(x1, alpha=alpha)
        # 4x4x512 now
        
        x2 = tf.layers.conv2d_transpose(x1, 256, 5, strides=2, padding='same')
        x2 = tf.layers.batch_normalization(x2, training=training)
        x2 = leaky_relu(x2, alpha=alpha)
        # 8x8x256 now
        
        x3 = tf.layers.conv2d_transpose(x2, 128, 5, strides=2, padding='same')
        x3 = tf.layers.batch_normalization(x3, training=training)
        x3 = leaky_relu(x3, alpha=alpha)
        # 16x16x128 now
        
        # Output layer
        logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')
        # 32x32x3 now
        
        img = tf.tanh(logits)
        return img
コード例 #29
0
    def __init__(self,
                 latent_dim,
                 input_channels,
                 filters,
                 upsample=True,
                 upsample_rgb=True,
                 rgba=False):
        super().__init__()
        self.upsample = nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=False) if upsample else None

        self.to_style1 = nn.Linear(latent_dim, input_channels)
        self.to_noise1 = nn.Linear(1, filters)
        self.conv1 = Conv2DMod(input_channels, filters, 3)

        self.to_style2 = nn.Linear(latent_dim, filters)
        self.to_noise2 = nn.Linear(1, filters)
        self.conv2 = Conv2DMod(filters, filters, 3)

        self.activation = leaky_relu(0.2)
        self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)
コード例 #30
0
  def GetPOSIT(self, qvecs, sims, w2v_sims, matches):
    qscores = []
    for qtok in range(len(qvecs)):
      # Basic matches, max-sim, average-kmax-sim, exact match
      svec = dy.concatenate(sims[qtok])
      sim = dy.kmax_pooling(dy.transpose(svec), 1)[0]
      sim5 = dy.mean_elems(dy.kmax_pooling(dy.transpose(svec), 5)[0])
      wvec = dy.concatenate(w2v_sims[qtok])
      wsim = dy.kmax_pooling(dy.transpose(wvec), 1)[0]
      wsim5 = dy.mean_elems(dy.kmax_pooling(dy.transpose(wvec), 5)[0])
      mvec = dy.concatenate(matches[qtok])
      msim = dy.kmax_pooling(dy.transpose(mvec), 1)[0]
      msim5 = dy.mean_elems(dy.kmax_pooling(dy.transpose(mvec), 5)[0])
      layer1 = (self.W_term1.expr() *
                dy.concatenate(
                    [sim, sim5,
                     wsim, wsim5,
                     msim, msim5
                    ]) +
                self.b_term1.expr())
      qscores.append(self.W_term.expr() * utils.leaky_relu(layer1))

    return qscores
コード例 #31
0
 def leaky_relu(x, name="leaky_relu"):
     return utils.leaky_relu(x, alpha=0.2, name=name)
コード例 #32
0
 def Fd(self, input, keep_prob):
     hidden1 = tf.nn.dropout(utils.leaky_relu(tf.matmul(input, self.Wd1) + self.bd1), keep_prob)
     y_pred = tf.matmul(hidden1, self.Wd2) + self.bd2
     return y_pred