def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h3 = T.flatten(h3, 2)
    y = -softplus(T.dot(h3, wy)+by)
    return y
Пример #2
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6, g6, b6,
            wy):
    h = lrelu(dnn_conv(X, w, subsample=(1, 1), border_mode=(1, 1)))
    h2 = lrelu(
        batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(1, 1)),
                  g=g2,
                  b=b2))
    h3 = lrelu(
        batchnorm(dnn_conv(h2, w3, subsample=(1, 1), border_mode=(1, 1)),
                  g=g3,
                  b=b3))
    h4 = lrelu(
        batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(1, 1)),
                  g=g4,
                  b=b4))
    h5 = lrelu(
        batchnorm(dnn_conv(h4, w5, subsample=(1, 1), border_mode=(1, 1)),
                  g=g5,
                  b=b5))
    h6 = lrelu(
        batchnorm(dnn_conv(h5, w6, subsample=(2, 2), border_mode=(1, 1)),
                  g=g6,
                  b=b6))
    h6 = T.flatten(h6, 2)
    y = sigmoid(T.dot(h6, wy))
    return y
Пример #3
0
def discrim( t, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5 ):
    h1 = lrelu( dnn_conv( t, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    yd = sigmoid( T.dot( T.flatten( h4, 2 ), w5 ) )
    return yd
Пример #4
0
 def discrim(X, w, w2, w3, wy):
     h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
     h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
     h2 = T.flatten(h2, 2)
     h3 = lrelu(batchnorm(T.dot(h2, w3)))
     y = sigmoid(T.dot(h3, wy))
     return y
Пример #5
0
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
Пример #6
0
def encoder( s, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5 ):
    h1 = lrelu( dnn_conv( s, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    z = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    return T.flatten( z, 2 )
Пример #7
0
def denseConvlayer(layer_inputs, bottleneck_scale, growth_rate, is_training):
    # Build the bottleneck operation
    net = layer_inputs
    net_temp = tf.identity(net)
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_1')
    net = conv2(net,
                kernel=1,
                output_channel=bottleneck_scale * growth_rate,
                stride=1,
                use_bias=False,
                scope='conv1x1')
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_2')
    net = conv2(net,
                kernel=3,
                output_channel=growth_rate,
                stride=1,
                use_bias=False,
                scope='conv3x3')

    # Concatenate the processed feature to the feature
    net = tf.concat([net_temp, net], axis=3)

    return net
Пример #8
0
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3,wx):
    h0 = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h0 = h0.reshape((h0.shape[0], ngf*4, 2, 2))
    h1 = relu(batchnorm(deconv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h2 = relu(batchnorm(deconv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    x = tanh(deconv(h2, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #10
0
 def apply(self, batch_size=None, rand_vals=None):
     """
     Apply this generator module. Pass _either_ batch_size or rand_vals.
     """
     assert not ((batch_size is None) and
                 (rand_vals is None)), "need either batch_size or rand_vals"
     if rand_vals is None:
         rand_shape = (batch_size, self.rand_dim)
         if self.rand_type == 'normal':
             rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                         dtype=theano.config.floatX)
         else:
             rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                          dtype=theano.config.floatX)
     else:
         rand_shape = (rand_vals.shape[0], self.rand_dim)
     rand_vals = rand_vals.reshape(rand_shape)
     # transform random values into fc layer
     h1 = T.dot(rand_vals, self.w1)
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # transform from fc layer to output
     h2 = T.dot(h1, self.w2)
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     if self.final_relu:
         h2 = relu(h2)
     return h2
Пример #11
0
def gen_test_tanh(_z, _params, _pls, n_layers=3, n_f=128, init_sz=4):
    tan_z = tanh(_z)
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _pls[0]
    s = _pls[n_layers + 1]
    h0 = relu(
        batchnorm(T.dot(T.clip(tan_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0,
                  b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _pls[n + 1]
        s = _pls[n + n_layers + 2]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      u=u,
                      s=s,
                      g=g,
                      b=b))
        hs.append(hout)
    x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    return x
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = -relu(T.dot(h2, wy)+by)
    return y
Пример #13
0
 def apply(self, batch_size=None, rand_vals=None):
     """
     Apply this generator module. Pass _either_ batch_size or rand_vals.
     """
     assert not ((batch_size is None) and (rand_vals is None)), "need either batch_size or rand_vals"
     if rand_vals is None:
         rand_shape = (batch_size, self.rand_dim)
         if self.rand_type == 'normal':
             rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                         dtype=theano.config.floatX)
         else:
             rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                          dtype=theano.config.floatX)
     else:
         rand_shape = (rand_vals.shape[0], self.rand_dim)
     rand_vals = rand_vals.reshape(rand_shape)
     # transform random values into fc layer
     h1 = T.dot(rand_vals, self.w1)
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # transform from fc layer to output
     h2 = T.dot(h1, self.w2)
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     if self.final_relu:
         h2 = relu(h2)
     return h2
Пример #14
0
 def apply(self, input):
     """
     Apply this discriminator module to the given input. This produces a
     collection of filter responses for feedforward and a spatial grid of
     discriminator outputs.
     """
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.ds_stride               # stride for "learned downsampling"
     # apply first conv layer
     h1 = dnn_conv(input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = lrelu(h1)
     # apply second conv layer (may include downsampling)
     if self.use_pooling:
         h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
         h2 = dnn_pool(h2, (ss,ss), stride=(ss, ss), mode='max', pad=(0, 0))
     else:
         h2 = dnn_conv(h1, self.w2, subsample=(ss, ss), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
     
     # apply discriminator layer
     y = dnn_conv(h2, self.wd, subsample=(1, 1), border_mode=(bm, bm))
     y = sigmoid(T.flatten(y, 2)) # flatten to (batch_size, num_preds)
     return h2, y
Пример #15
0
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
    Gl1 = Gl1.reshape((Gl1.shape[0], Channel[-1], Convlayersize[-1],
                       Convlayersize[-1], Convlayersize[-1]))

    input_shape = (None, None, Convlayersize[-1], Convlayersize[-1],
                   Convlayersize[-1])
    filter_shape = (Channel[-1], Channel[-2], kernal[-1], kernal[-1],
                    kernal[-1])

    Gl2 = relu(
        batchnorm(conv(Gl1,
                       w2,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g2,
                  b=b2))

    input_shape = (None, None, Convlayersize[-2], Convlayersize[-2],
                   Convlayersize[-2])
    filter_shape = (Channel[-2], Channel[-3], kernal[-2], kernal[-2],
                    kernal[-2])

    Gl3 = relu(
        batchnorm(conv(Gl2,
                       w3,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g3,
                  b=b3))

    input_shape = (None, None, Convlayersize[-3], Convlayersize[-3],
                   Convlayersize[-3])
    filter_shape = (Channel[-3], Channel[-4], kernal[-3], kernal[-3],
                    kernal[-3])

    Gl4 = relu(
        batchnorm(conv(Gl3,
                       w4,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g4,
                  b=b4))

    input_shape = (None, None, Convlayersize[-4], Convlayersize[-4],
                   Convlayersize[-4])
    filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4],
                    kernal[-4])

    GlX = sigmoid(
        conv(Gl4,
             wx,
             filter_shape=filter_shape,
             input_shape=input_shape,
             conv_mode='deconv'))
    return GlX
Пример #16
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf*8, 4, 4))
    h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #17
0
def domain_discrim( st, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6 ):
    h1 = lrelu( dnn_conv( st, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    h5 = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    ydd = sigmoid( T.dot( T.flatten( h5, 2 ), w6 ) )
    return ydd
Пример #18
0
 def feature_function(input_data, is_train=True):
     h0 = relu(batchnorm(X=dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2)), g=bn_w0, b=bn_b0))
     h1 = relu(batchnorm(X=dnn_conv(h0,         conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=bn_w1, b=bn_b1))
     h2 = relu(batchnorm(X=dnn_conv(h1,         conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=bn_w2, b=bn_b2))
     h3 = relu(batchnorm(X=dnn_conv(h2,         conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=bn_w3, b=bn_b3))
     h3 = T.flatten(h3, 2)
     f  = tanh(T.dot(h3, linear_w4)+linear_b4)
     return f
Пример #19
0
def gen(Z, w, w2, w3, gwx):
    h = relu(batchnorm(T.dot(Z, w)))
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf * 2, 7, 7))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))

    x = sigmoid(deconv(h3, gwx, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #20
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    return y
Пример #21
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf*8, 4, 4))
    h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, wa):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = square(T.dot(h2, wy))
    y = T.dot(T.log(1+y), T.exp(wa))
    return y
Пример #23
0
def decoder( z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5 ):
    h1 = relu( batchnorm( T.dot( z, w1 ), g = g1, b = b1 ) )
    h1 = h1.reshape( (h1.shape[ 0 ], nf * 8, 4, 4 ) ) 
    h2 = relu( batchnorm( deconv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = relu( batchnorm( deconv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = relu( batchnorm( deconv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    t = tanh( deconv( h4, w5, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    return t
Пример #24
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    return y
Пример #25
0
 def generator_function(hidden_data, is_train=True):
     h0_0   = relu(batchnorm(X=T.dot(hidden_data, linear_w0_0), g=bn_w0_0, b=bn_b0_0))
     h0_1   = relu(batchnorm(X=T.dot(h0_0,        linear_w0_1), g=bn_w0_1, b=bn_b0_1))
     h0     = h0_1.reshape((h0_1.shape[0], num_gen_filters0, init_image_size, init_image_size))
     h1     = relu(batchnorm(deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=bn_w1, b=bn_b1))
     h2     = relu(batchnorm(deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=bn_w2, b=bn_b2))
     h3     = relu(batchnorm(deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=bn_w3, b=bn_b3))
     output = tanh(deconv(h3, conv_w4, subsample=(2, 2), border_mode=(2, 2))+conv_b4.dimshuffle('x', 0, 'x', 'x'))
     return output
Пример #26
0
def converter( Z, w5d, g5d, b5d, w4d, g4d, b4d, w3d, g3d, b3d, w2d, g2d, b2d, w1d ):
    h5d = relu( batchnorm( dnn_conv( Z, w5d, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5d, b = b5d ) )
    h5d = h5d.reshape( ( h5d.shape[ 0 ], nf * 8, 4, 4 ) )
    h4d = relu( batchnorm( deconv( h5d, w4d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4d, b = b4d ) )
    h3d = relu( batchnorm( deconv( h4d, w3d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3d, b = b3d ) )
    h2d = relu( batchnorm( deconv( h3d, w2d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2d, b = b2d ) )
    h1d = tanh( deconv( h2d, w1d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    y = h1d
    return y
Пример #27
0
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6, g6, b6, wy):
    h = lrelu(dnn_conv(X, w, subsample=(1, 1), border_mode=(1, 1)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(1, 1)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(1, 1), border_mode=(1, 1)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(1, 1)), g=g4, b=b4))
    h5 = lrelu(batchnorm(dnn_conv(h4, w5, subsample=(1, 1), border_mode=(1, 1)), g=g5, b=b5))
    h6 = lrelu(batchnorm(dnn_conv(h5, w6, subsample=(2, 2), border_mode=(1, 1)), g=g6, b=b6))
    h6 = T.flatten(h6, 2)
    y = sigmoid(T.dot(h6, wy))
    return y
Пример #28
0
def discrim(X):
    current_input = dropout(X, 0.3)
    ### encoder ###
    cv1 = relu(
        dnn_conv(current_input, aew1, subsample=(1, 1), border_mode=(1, 1)))
    cv2 = relu(
        batchnorm(dnn_conv(cv1, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2,
                  b=aeb2))
    cv3 = relu(
        batchnorm(dnn_conv(cv2, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3,
                  b=aeb3))
    cv4 = relu(
        batchnorm(dnn_conv(cv3, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4,
                  b=aeb4))
    cv5 = relu(
        batchnorm(dnn_conv(cv4, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5,
                  b=aeb5))
    cv6 = relu(
        batchnorm(dnn_conv(cv5, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6,
                  b=aeb6))

    ### decoder ###
    dv6 = relu(
        batchnorm(deconv(cv6, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6t,
                  b=aeb6t))
    dv5 = relu(
        batchnorm(deconv(dv6, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5t,
                  b=aeb5t))
    dv4 = relu(
        batchnorm(deconv(dv5, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4t,
                  b=aeb4t))
    dv3 = relu(
        batchnorm(deconv(dv4, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3t,
                  b=aeb3t))
    dv2 = relu(
        batchnorm(deconv(dv3, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2t,
                  b=aeb2t))
    dv1 = tanh(deconv(dv2, aew1, subsample=(1, 1), border_mode=(1, 1)))

    rX = dv1

    mse = T.sqrt(T.sum(T.abs_(T.flatten(X - rX, 2)), axis=1)) + T.sqrt(
        T.sum(T.flatten((X - rX)**2, 2), axis=1))  # L1 and L2 loss
    return T.flatten(cv6, 2), rX, mse
Пример #29
0
 def generator_function(hidden_data, is_train=True):
     # layer 0 (linear)
     h0     = relu(batchnorm(X=T.dot(hidden_data, linear_w0), g=linear_bn_w0, b=linear_bn_b0))
     h0     = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1     = relu(batchnorm(deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w1, b=conv_bn_b1))
     # layer 2 (deconv)
     h2     = relu(batchnorm(deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w2, b=conv_bn_b2))
     # layer 3 (deconv)
     output = tanh(deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))+conv_b3.dimshuffle('x', 0, 'x', 'x'))
     return output
Пример #30
0
def gen(Z, Y, w, w2, w3, wx):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, w)))
    h = T.concatenate([h, Y], axis=1)
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf*2, npx_, npx_))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #31
0
def discrim(X, Y, w, w2, w3, wy):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    X = conv_cond_concat(X, yb)
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h = conv_cond_concat(h, yb)
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
    h2 = T.flatten(h2, 2)
    h2 = T.concatenate([h2, Y], axis=1)
    h3 = lrelu(batchnorm(T.dot(h2, w3)))
    h3 = T.concatenate([h3, Y], axis=1)
    y = sigmoid(T.dot(h3, wy))
    return y
Пример #32
0
def gen(Z, Y, w, w2, w3, wx):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, w)))
    h = T.concatenate([h, Y], axis=1)
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf * 2, temp, temp))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #33
0
def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
Пример #34
0
def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, ws=(4, 4), stride=(4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, ws=(2, 2), stride=(2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, ws=(1, 1), stride=(1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
Пример #35
0
def discrim(X, Y, w, w2, w3, wy):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    X = conv_cond_concat(X, yb)
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h = conv_cond_concat(h, yb)
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2),
                                  border_mode=(2, 2))))
    h2 = T.flatten(h2, 2)
    h2 = T.concatenate([h2, Y], axis=1)
    h3 = lrelu(batchnorm(T.dot(h2, w3)))
    h3 = T.concatenate([h3, Y], axis=1)
    y = sigmoid(T.dot(h3, wy))
    return y
Пример #36
0
def gen(Z, Y, w, w2, w3, wx):
    #Z: (nbatch, nz) = (128, 100)
    #Y: (nbatch, ny) = (128, 10)
    #w: (nz+ny, ngfc) = (110, 1024)
    #w2: (ngfc+ny, ngf*2*7*7) = (1024+10, 64*2*7*7) = (1034, 6272)
    #w3: (ngf*2+ny, ngf, 5, 5) = (128+10, 64, 5, 5 ) = (138, 64, 5, 5)
    #wx: (ngf+ny, nc, 5, 5) = (64+10, 1, 5, 5) = (74, 1, 5, 5)
    print '\n@@@@ gen()'
    printVal('Y', Y)
    printVal('w', w)  #matrix
    printVal('w2', w2)  #matrix
    printVal('w3', w3)  #tensor
    printVal('wx', wx)  #tensor
    # Yの要素の並びの入れ替え。数字の引数は、次元番号。'x' は ブロードキャスト
    #(G1)
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    # yb は4次元テンソル
    printVal('yb', yb)
    # 行列 Z と Y を結合(横方向):いわゆる Conditional GAN の形にする。

    #(G2)
    Z = T.concatenate([Z, Y], axis=1)  # Z: (128, 110)

    #(G3)
    # Z*w をバッチ正規化して、ReLU 適用
    t1 = T.dot(Z, w)  #full connect : t1: (128, 1024)
    printVal('t1', t1)
    h = relu(batchnorm(t1))
    # h: (128, 1024)
    #(G4)
    h = T.concatenate([h, Y], axis=1)  # h: (128, 1034)
    #(G5)
    h2 = relu(batchnorm(T.dot(h, w2)  #NOT full connect
                        ))
    #(G6)
    h2 = h2.reshape((h2.shape[0], ngf * 2, 7, 7))

    #(G7)
    h3, yb2 = conv_cond_concat2(h2, yb)  #XXX

    #(G8)デコンボリューション:論文によれば、空間プーリングの代わりに適用する
    d = deconv(h3, w3, subsample=(2, 2), border_mode=(2, 2))
    printVal('d', d)  # (128, 64, 14, 14)
    #h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    #(G9)
    h4 = relu(batchnorm(d))
    #(G10)
    h5, yb3 = conv_cond_concat2(h4, yb)
    #(G11)
    x = sigmoid(deconv(h5, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x, yb, yb2, d, h3, h5
Пример #37
0
def bnorm_statistics(X, w, w2, g2, b2, w3, g3, b3, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))

    h2 = dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))
    h2_u, h2_s = mean_and_var(h2)
    h2 = lrelu(batchnorm(h2, g=g2, b=b2))

    h3 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    h3_u, h3_s = mean_and_var(h3)
    h3 = lrelu(batchnorm(h3, g=g3, b=b3))

    h_us = [h2_u, h3_u]
    h_ss = [h2_s, h3_s]
    return h_us, h_ss
Пример #38
0
def bnorm_statistics(X, w, w2, g2, b2, w3, g3, b3, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))

    h2 = dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))
    h2_u, h2_s = mean_and_var(h2)
    h2 = lrelu(batchnorm(h2, g=g2, b=b2))

    h3 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    h3_u, h3_s = mean_and_var(h3)
    h3 = lrelu(batchnorm(h3, g=g3, b=b3))

    h_us = [h2_u, h3_u]
    h_ss = [h2_s, h3_s]
    return h_us, h_ss
Пример #39
0
 def apply(self, input, rand_vals=None):
     """
     Apply this generator module to some input.
     """
     batch_size = input.shape[0]
     bm = int((self.filt_dim - 1) / 2)  # use "same" mode convolutions
     ss = self.us_stride  # stride for "learned upsampling"
     if self.use_pooling:
         # "unpool" the input if desired
         input = input.repeat(ss, axis=2).repeat(ss, axis=3)
     # get shape for random values that will augment input
     rand_shape = (batch_size, self.rand_chans, input.shape[2],
                   input.shape[3])
     if self.use_rand:
         # augment input with random channels
         if rand_vals is None:
             if self.rand_type == 'normal':
                 rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                             dtype=theano.config.floatX)
             else:
                 rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                              dtype=theano.config.floatX)
         rand_vals = rand_vals.reshape(rand_shape)
         # stack random values on top of input
         full_input = T.concatenate([rand_vals, input], axis=1)
     else:
         # don't augment input with random channels
         full_input = input
     # apply first convolution, perhaps with fractional striding
     if self.use_pooling:
         h1 = dnn_conv(full_input,
                       self.w1,
                       subsample=(1, 1),
                       border_mode=(bm, bm))
     else:
         # apply first conv layer (with fractional stride for upsampling)
         h1 = deconv(full_input,
                     self.w1,
                     subsample=(ss, ss),
                     border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # apply second conv layer
     h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     h2 = relu(h2)
     return h2
Пример #40
0
def make_conv_layer(X,
                    input_size,
                    output_size,
                    input_filters,
                    output_filters,
                    name,
                    index,
                    weights=None,
                    filter_sz=5):
    is_deconv = output_size >= input_size
    w_size = (input_filters, output_filters, filter_sz,
              filter_sz) if is_deconv else (output_filters, input_filters,
                                            filter_sz, filter_sz)

    if weights is None:
        w = gifn(w_size, '%sw%i' % (name, index))
        g = gain_ifn((output_filters), '%sg%i' % (name, index))
        b = bias_ifn((output_filters), '%sb%i' % (name, index))
    else:
        w, g, b = weights

    conv_method = deconv if is_deconv else dnn_conv
    activation = relu if is_deconv else lrelu
    sub = output_size / input_size if is_deconv else input_size / output_size
    if filter_sz == 3:
        bm = 1
    else:
        bm = 2
    layer = activation(
        batchnorm(conv_method(X, w, subsample=(sub, sub),
                              border_mode=(bm, bm)),
                  g=g,
                  b=b))
    return layer, [w, g, b]
Пример #41
0
    def apply(self, batch_size=None, rand_vals=None):
        """
        Apply this generator module. Pass _either_ batch_size or rand_vals.
        """
        assert not ((batch_size is None) and
                    (rand_vals is None)), "need either batch_size or rand_vals"
        if rand_vals is None:
            rand_shape = (batch_size, self.rand_dim)
            if self.rand_type == 'normal':
                rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                            dtype=theano.config.floatX)
            else:
                rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                             dtype=theano.config.floatX)
        else:
            rand_shape = (rand_vals.shape[0], self.rand_dim)
        rand_vals = rand_vals.reshape(rand_shape)
        # transform random values linearly
        h1 = T.dot(rand_vals, self.w1)
        if self.apply_bn:
            h1 = batchnorm(h1, g=self.g1, b=self.b1)
        if self.final_relu:
            h1 = relu(h1)
        return h1


##############
# EYE BUFFER #
##############
Пример #42
0
 def encoder_feature_function(input_data):
     # layer 0 (conv)
     h0 = dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))
     h0 = relu(batchnorm(h0, g=bn_w0, b=bn_b0))
     # layer 1 (conv)
     h1 = dnn_conv(        h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))
     h1 = relu(batchnorm(h1, g=bn_w1, b=bn_b1))
     # layer 2 (conv)
     h2 = dnn_conv(        h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))
     h2 = relu(batchnorm(h2, g=bn_w2, b=bn_b2))
     # layer 3 (conv)
     h3 = dnn_conv(        h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))
     h3 = relu(batchnorm(h3, g=bn_w3, b=bn_b3))
     # feature
     feature = T.flatten(h3, 2)
     return feature
Пример #43
0
def make_conv_layer(X, input_size, output_size, input_filters, 
                    output_filters, name, index,
                    weights = None, filter_sz = 5):
    
    is_deconv = output_size >= input_size

    w_size = (input_filters, output_filters, filter_sz, filter_sz) \
            if is_deconv else (output_filters, input_filters, filter_sz, filter_sz)
    
    if weights is None:
        w = gifn(w_size, '%sw%i' %(name, index))
        g = gain_ifn((output_filters), '%sg%i' %(name, index))
        b = bias_ifn((output_filters), '%sb%i' %(name, index))
    else:
        w,g,b = weights
    
    conv_method = deconv if is_deconv else dnn_conv
    activation = relu if is_deconv else lrelu
    
    sub = output_size / input_size if is_deconv else input_size / output_size
    
    if filter_sz == 3:
        bm = 1
    else:
        bm = 2
    
    layer = activation(batchnorm(conv_method(X, w, subsample=(sub, sub), border_mode=(bm, bm)), g=g, b=b))
    
    return layer, [w,g,b]
Пример #44
0
 def encoder_function(input_data):
     # layer 0 (conv)
     h0 = dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))
     h0 = relu(batchnorm(h0, g=bn_w0, b=bn_b0))
     # layer 1 (conv)
     h1 = dnn_conv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))
     h1 = relu(batchnorm(h1, g=bn_w1, b=bn_b1))
     # layer 2 (conv)
     h2 = dnn_conv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))
     h2 = relu(batchnorm(h2, g=bn_w2, b=bn_b2))
     # layer 3 (conv)
     h3 = dnn_conv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))
     h3 = T.flatten(relu(batchnorm(h3, g=bn_w3, b=bn_b3)), 2)
     # layer output
     hidden_data = T.dot(h3, hidden_w) + hidden_b
     return hidden_data
Пример #45
0
def gen_postlearn(_z, _params, n_layers=3, n_f=128, init_sz=4):
    output = []
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0_o = T.dot(_z, gw0)
    output = [h0_o]
    h0 = relu(batchnorm(h0_o, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        h_o = deconv(hin, w, subsample=(2, 2), border_mode=(2, 2))
        hout = relu(batchnorm(h_o, g=g, b=b))
        hs.append(hout)
        output.append(h_o)
    x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    return x, output
Пример #46
0
def gen(Z, Y):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, gw), g=gg, b=gb))
    h = h.reshape((h.shape[0], ngf * 4, 4, 4))
    h = conv_cond_concat(h, yb)
    h2 = relu(
        batchnorm(deconv(h, gw2, subsample=(2, 2), border_mode=(2, 2)),
                  g=gg2,
                  b=gb2))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(
        batchnorm(deconv(h2, gw3, subsample=(2, 2), border_mode=(2, 2)),
                  g=gg3,
                  b=gb3))
    h3 = conv_cond_concat(h3, yb)
    x = tanh(deconv(h3, gw4, subsample=(2, 2), border_mode=(2, 2)))
    return x
Пример #47
0
def encoder(X, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wz):
    filter_shape = (Channel[1], Channel[0], kernal[0], kernal[0], kernal[0])
    Dl1 = lrelu(batchnorm(conv(X, w1, filter_shape=filter_shape), g=g1, b=b1))

    filter_shape = (Channel[2], Channel[1], kernal[1], kernal[1], kernal[1])
    Dl2 = lrelu(batchnorm(conv(Dl1, w2, filter_shape=filter_shape), g=g2,
                          b=b2))

    filter_shape = (Channel[3], Channel[2], kernal[2], kernal[2], kernal[2])
    Dl3 = lrelu(batchnorm(conv(Dl2, w3, filter_shape=filter_shape), g=g3,
                          b=b3))

    filter_shape = (Channel[4], Channel[3], kernal[3], kernal[3], kernal[3])
    Dl4 = lrelu(batchnorm(conv(Dl3, w4, filter_shape=filter_shape), g=g4,
                          b=b4))
    Dl4 = T.flatten(Dl4, 2)
    DlZ = sigmoid(T.dot(Dl4, wz))
    return DlZ
Пример #48
0
 def apply(self, input, rand_vals=None):
     """
     Apply this generator module to some input.
     """
     batch_size = input.shape[0]
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.us_stride               # stride for "learned upsampling"
     if self.use_pooling:
         # "unpool" the input if desired
         input = input.repeat(ss, axis=2).repeat(ss, axis=3)
     # get shape for random values that will augment input
     rand_shape = (batch_size, self.rand_chans, input.shape[2], input.shape[3])
     if self.use_rand:
         # augment input with random channels
         if rand_vals is None:
             if self.rand_type == 'normal':
                 rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                             dtype=theano.config.floatX)
             else:
                 rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                              dtype=theano.config.floatX)
         rand_vals = rand_vals.reshape(rand_shape)
         # stack random values on top of input
         full_input = T.concatenate([rand_vals, input], axis=1)
     else:
         # don't augment input with random channels
         full_input = input
     # apply first convolution, perhaps with fractional striding
     if self.use_pooling:
         h1 = dnn_conv(full_input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     else:
         # apply first conv layer (with fractional stride for upsampling)
         h1 = deconv(full_input, self.w1, subsample=(ss, ss), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # apply second conv layer
     h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     h2 = relu(h2)
     return h2
Пример #49
0
def gen(_z, _params, n_layers=3, n_f=128, init_sz=4, nc=3):
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0 = relu(batchnorm(T.dot(_z, gw0), g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      g=g,
                      b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))

    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f
Пример #50
0
 def residual_block(inputs, output_channels, stride, scope):
     with tf.variable_scope(scope):
         net = ops.conv3d(inputs,
                          3,
                          output_channels,
                          stride,
                          use_bias=False,
                          scope='conv_1')
         if (FLAGS.GAN_type == 'GAN'):
             net = ops.batchnorm(net, FLAGS.is_training)
         net = ops.prelu_tf(net)
         net = ops.conv3d(net,
                          3,
                          output_channels,
                          stride,
                          use_bias=False,
                          scope='conv_2')
         if (FLAGS.GAN_type == 'GAN'):
             net = ops.batchnorm(net, FLAGS.is_training)
         net = net + inputs
     return net
Пример #51
0
 def generator_function(hidden_data, is_train=True):
     # layer 0 (linear)
     h0     = T.dot(hidden_data, linear_w0)
     h0     = h0 + t_rng.normal(size=h0.shape, std=0.01, dtype=t_floatX)
     h0     = relu(batchnorm(X=h0, g=linear_bn_w0, b=linear_bn_b0))
     h0     = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1     = deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))
     h1     = h1 + t_rng.normal(size=h1.shape, std=0.01, dtype=t_floatX)
     h1     = relu(batchnorm(h1, g=conv_bn_w1, b=conv_bn_b1))
     # layer 2 (deconv)
     h2     = deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))
     h2     = h2 + t_rng.normal(size=h2.shape, std=0.01, dtype=t_floatX)
     h2     = relu(batchnorm(h2, g=conv_bn_w2, b=conv_bn_b2))
     # layer 3 (deconv)
     h3     = deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))
     h3     = h3 + t_rng.normal(size=h3.shape, std=0.01, dtype=t_floatX)
     h3     = relu(batchnorm(h3, g=conv_bn_w3, b=conv_bn_b3))
     # layer 4 (deconv)
     output = tanh(deconv(h3, conv_w4, subsample=(2, 2), border_mode=(2, 2))+conv_b4.dimshuffle('x', 0, 'x', 'x'))
     return output
Пример #52
0
def transitionLayer(layer_inputs, output_channel, is_training):
    net = layer_inputs
    net = batchnorm(net, is_training)
    net = prelu_tf(net)
    net = conv2(net,
                1,
                output_channel,
                stride=1,
                use_bias=False,
                scope='conv1x1')

    return net
Пример #53
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6, g6, b6,
        wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf * 4, 4, 4))
    h2 = relu(
        batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(1, 1)),
                  g=g2,
                  b=b2))
    h3 = relu(
        batchnorm(deconv(h2, w3, subsample=(1, 1), border_mode=(1, 1)),
                  g=g3,
                  b=b3))
    h4 = relu(
        batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(1, 1)),
                  g=g4,
                  b=b4))
    h5 = relu(
        batchnorm(deconv(h4, w5, subsample=(1, 1), border_mode=(1, 1)),
                  g=g5,
                  b=b5))
    h6 = relu(
        batchnorm(deconv(h5, w6, subsample=(2, 2), border_mode=(1, 1)),
                  g=g6,
                  b=b6))
    x = tanh(deconv(h6, wx, subsample=(1, 1), border_mode=(1, 1)))
    return x
Пример #54
0
def gen_test(_z,
             _params,
             _batchnorm,
             n_layers=3,
             n_f=128,
             init_sz=4,
             nc=3,
             use_tanh=False):
    if use_tanh:
        _z = tanh(_z)
    # gw0 : weight of dense layer(0)
    # gg0 , gb0 : params of batchnorm layer
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _batchnorm[0]
    s = _batchnorm[n_layers + 1]
    # Clip z => Dense => BatchNorm => ReLU
    h0 = relu(
        batchnorm(T.dot(T.clip(_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0, b=gb0))
    # reshape to 4D
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _batchnorm[n + 1]
        s = _batchnorm[n + n_layers + 2]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      u=u,
                      s=s,
                      g=g,
                      b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))
    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f
Пример #55
0
def converter( IS, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5,
        w5d, g5d, b5d, w4d, g4d, b4d, w3d, g3d, b3d, w2d, g2d, b2d, w1d ):
    h1 = lrelu( dnn_conv( IS, w1, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    h5 = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    h5d = relu( batchnorm( dnn_conv( h5, w5d, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5d, b = b5d ) )
    h5d = h5d.reshape( ( h5d.shape[0], nf * 8, 4, 4 ) )
    h4d = relu( batchnorm( deconv( h5d, w4d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4d, b = b4d ) )
    h3d = relu( batchnorm( deconv( h4d, w3d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3d, b = b3d ) )
    h2d = relu( batchnorm( deconv( h3d, w2d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2d, b = b2d ) )
    h1d = tanh( deconv( h2d, w1d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    y = h1d
    return y
Пример #56
0
def disc_test(_x, _params, _batchnorm, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        u = _batchnorm[n]
        s = _batchnorm[n + n_layers]
        hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = sigmoid(T.dot(h, _params[-1]))
    return y
Пример #57
0
    def discriminator_block(inputs, output_channel, kernel_size, stride,
                            scope):
        with tf.variable_scope(scope):
            net = ops.conv3d(inputs,
                             kernel_size,
                             output_channel,
                             stride,
                             use_bias=False,
                             scope='conv1')
            if (FLAGS.GAN_type == 'GAN'):
                net = ops.batchnorm(net, FLAGS.is_training)
            net = ops.lrelu(net, 0.2)

        return net
Пример #58
0
def predict_test(_x, _params, _batchnorm, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        u = _batchnorm[n]
        s = _batchnorm[n + n_layers]
        hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = tanh(T.dot(h, _params[-1]))
    return y
Пример #59
0
def gen(Z, Y, w, w2, w3, wx):
    print '\n@@@@ gen()'
    printVal('Z', Z)  # matrix
    #printVal( 'Y', Y )  # matrix
    printVal('w', w)  # matrix
    printVal('w2', w2)  # matrix

    printVal('w3', w3)  # tensor
    printVal('wx', wx)  # tensor
    # Yの要素の並びの入れ替え。数字の引数は、次元番号。'x' は ブロードキャスト
    # 並び替えの前後で、全体の要素数は変わらない。
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    # yb は4次元テンソル
    #printVal('yb', yb)
    # 行列 Z と Y を結合(横方向)
    Z = T.concatenate([Z, Y], axis=1)  # matrix
    # Z*w(Full Connect) をバッチ正規化して、ReLU 適用
    tmp_a = T.dot(Z, w)  # dot(matrix, matrix)->matrix
    printVal('dot(Z,w) -> tmp_a', tmp_a)
    h = relu(batchnorm(T.dot(Z, w)))  #CCC
    h = T.concatenate([h, Y], axis=1)  #CCC

    printVal('h', h)  # matrix
    h2 = relu(batchnorm(T.dot(h, w2)))  #CCC
    printVal('h2', h2)  #h2:matrix
    h2r = h2.reshape((h2.shape[0], GEN_NUM_FILTER * 2, 7, 7))  #CCC
    printVal('h2r', h2r)  #h2r:tensor
    h2ry = conv_cond_concat(h2r, yb)  #
    printVal('h2ry', h2ry)  #h2:tensor
    # デコンボリューション:論文によれば、空間プーリングの代わりに適用する
    d = deconv(h2ry, w3, subsample=(2, 2), border_mode=(2, 2))
    printVal('d', d)
    #h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = relu(batchnorm(d))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x, h2
Пример #60
0
def predict(_x, _params, n_layers=3):
    w = _params[0]
    h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
    hs = [h0]
    for n in range(n_layers):
        hin = hs[-1]
        w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
        hout = lrelu(
            batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      g=g,
                      b=b))
        hs.append(hout)
    h = T.flatten(hs[-1], 2)
    y = tanh(T.dot(h, _params[-1]))
    return y