def gen_function_partial(self, layer, H, w, g, b, u, s, w2, g2, b2, u2, s2, w3, g3, b3, u3, s3, w4, g4, b4, u4, s4, wx):
        if layer==1:
            h = H
            x2, updates2 = running_batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2,
                                             running_u=u2, running_s=s2, momentum=self.bn_momentum, test_mode=True)
        if layer<=2:
            if layer==2:
                h2 = H
            else:
                h2 = relu(x2)
            x3, updates3 = running_batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3,
                                             running_u=u3, running_s=s3, momentum=self.bn_momentum, test_mode=True)

        if layer<=3:
            if layer==3:
                h3 = H
            else:
                h3 = relu(x3)
            x4, updates4 = running_batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4,
                                             running_u=u4, running_s=s4, momentum=self.bn_momentum, test_mode=True)

        if layer==4:
            h4 = H
        else:
            h4 = relu(x4)

        x = tanh(deconv(h4, wx, subsample=self.visible_subsamp, border_mode=self.visible_bmode))
        return x
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3,wx):
    h0 = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h0 = h0.reshape((h0.shape[0], ngf*4, 2, 2))
    h1 = relu(batchnorm(deconv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h2 = relu(batchnorm(deconv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    x = tanh(deconv(h2, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#3
0
def gen_test_tanh(_z, _params, _pls, n_layers=3, n_f=128, init_sz=4):
    tan_z = tanh(_z)
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _pls[0]
    s = _pls[n_layers + 1]
    h0 = relu(
        batchnorm(T.dot(T.clip(tan_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0,
                  b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _pls[n + 1]
        s = _pls[n + n_layers + 2]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      u=u,
                      s=s,
                      g=g,
                      b=b))
        hs.append(hout)
    x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#4
0
def decoder( z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5 ):
    h1 = relu( batchnorm( T.dot( z, w1 ), g = g1, b = b1 ) )
    h1 = h1.reshape( (h1.shape[ 0 ], nf * 8, 4, 4 ) ) 
    h2 = relu( batchnorm( deconv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = relu( batchnorm( deconv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = relu( batchnorm( deconv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    t = tanh( deconv( h4, w5, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    return t
示例#5
0
def gen(Z, w, w2, w3, gwx):
    h = relu(batchnorm(T.dot(Z, w)))
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf * 2, 7, 7))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))

    x = sigmoid(deconv(h3, gwx, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#6
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf*8, 4, 4))
    h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#7
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf*8, 4, 4))
    h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#8
0
 def generator_function(hidden_data, is_train=True):
     h0_0   = relu(batchnorm(X=T.dot(hidden_data, linear_w0_0), g=bn_w0_0, b=bn_b0_0))
     h0_1   = relu(batchnorm(X=T.dot(h0_0,        linear_w0_1), g=bn_w0_1, b=bn_b0_1))
     h0     = h0_1.reshape((h0_1.shape[0], num_gen_filters0, init_image_size, init_image_size))
     h1     = relu(batchnorm(deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=bn_w1, b=bn_b1))
     h2     = relu(batchnorm(deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=bn_w2, b=bn_b2))
     h3     = relu(batchnorm(deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=bn_w3, b=bn_b3))
     output = tanh(deconv(h3, conv_w4, subsample=(2, 2), border_mode=(2, 2))+conv_b4.dimshuffle('x', 0, 'x', 'x'))
     return output
示例#9
0
def converter( Z, w5d, g5d, b5d, w4d, g4d, b4d, w3d, g3d, b3d, w2d, g2d, b2d, w1d ):
    h5d = relu( batchnorm( dnn_conv( Z, w5d, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5d, b = b5d ) )
    h5d = h5d.reshape( ( h5d.shape[ 0 ], nf * 8, 4, 4 ) )
    h4d = relu( batchnorm( deconv( h5d, w4d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4d, b = b4d ) )
    h3d = relu( batchnorm( deconv( h4d, w3d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3d, b = b3d ) )
    h2d = relu( batchnorm( deconv( h3d, w2d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2d, b = b2d ) )
    h1d = tanh( deconv( h2d, w1d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    y = h1d
    return y
示例#10
0
文件: job.py 项目: mehdidc/dcgan
 def gen(Z, w, w2, w3, wx, use_batchnorm=True):
     if use_batchnorm:
         batchnorm_ = batchnorm
     else:
         batchnorm_ = lambda x:x
     h = relu(batchnorm_(T.dot(Z, w)))
     h2 = relu(batchnorm_(T.dot(h, w2)))
     h2 = h2.reshape((h2.shape[0], ngf*2, 7, 7))
     h3 = relu(batchnorm_(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
     x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
     return x
 def generator_function(hidden_data, is_train=True):
     # layer 0 (linear)
     h0     = relu(batchnorm(X=T.dot(hidden_data, linear_w0), g=linear_bn_w0, b=linear_bn_b0))
     h0     = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1     = relu(batchnorm(deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w1, b=conv_bn_b1))
     # layer 2 (deconv)
     h2     = relu(batchnorm(deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w2, b=conv_bn_b2))
     # layer 3 (deconv)
     output = tanh(deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))+conv_b3.dimshuffle('x', 0, 'x', 'x'))
     return output
示例#12
0
def gen(Z, Y, w, w2, w3, wx):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, w)))
    h = T.concatenate([h, Y], axis=1)
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf * 2, temp, temp))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#13
0
def gen(Z, Y, w, w2, w3, wx):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, w)))
    h = T.concatenate([h, Y], axis=1)
    h2 = relu(batchnorm(T.dot(h, w2)))
    h2 = h2.reshape((h2.shape[0], ngf*2, npx_, npx_))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x
 def generator_function(hidden_data, is_train=True):
     # layer 0 (linear)
     h0 = tanh(entropy_exp(X=T.dot(hidden_data, linear_w0), g=linear_bn_w0, b=linear_bn_b0))
     h0 = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1 = tanh(entropy_exp(deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w1, b=conv_bn_b1))
     # layer 2 (deconv)
     h2 = tanh(entropy_exp(deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w2, b=conv_bn_b2))
     # layer 3 (deconv)
     h3 = tanh(entropy_exp(deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=conv_bn_w3, b=conv_bn_b3))
     # layer 4 (deconv)
     output = tanh(deconv(h3, conv_w4, subsample=(2, 2), border_mode=(2, 2)) + conv_b4.dimshuffle("x", 0, "x", "x"))
     return output
示例#15
0
def gen(Z, Y, w, w2, w3, wx):
    #Z: (nbatch, nz) = (128, 100)
    #Y: (nbatch, ny) = (128, 10)
    #w: (nz+ny, ngfc) = (110, 1024)
    #w2: (ngfc+ny, ngf*2*7*7) = (1024+10, 64*2*7*7) = (1034, 6272)
    #w3: (ngf*2+ny, ngf, 5, 5) = (128+10, 64, 5, 5 ) = (138, 64, 5, 5)
    #wx: (ngf+ny, nc, 5, 5) = (64+10, 1, 5, 5) = (74, 1, 5, 5)
    print '\n@@@@ gen()'
    printVal('Y', Y)
    printVal('w', w)  #matrix
    printVal('w2', w2)  #matrix
    printVal('w3', w3)  #tensor
    printVal('wx', wx)  #tensor
    # Yの要素の並びの入れ替え。数字の引数は、次元番号。'x' は ブロードキャスト
    #(G1)
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    # yb は4次元テンソル
    printVal('yb', yb)
    # 行列 Z と Y を結合(横方向):いわゆる Conditional GAN の形にする。

    #(G2)
    Z = T.concatenate([Z, Y], axis=1)  # Z: (128, 110)

    #(G3)
    # Z*w をバッチ正規化して、ReLU 適用
    t1 = T.dot(Z, w)  #full connect : t1: (128, 1024)
    printVal('t1', t1)
    h = relu(batchnorm(t1))
    # h: (128, 1024)
    #(G4)
    h = T.concatenate([h, Y], axis=1)  # h: (128, 1034)
    #(G5)
    h2 = relu(batchnorm(T.dot(h, w2)  #NOT full connect
                        ))
    #(G6)
    h2 = h2.reshape((h2.shape[0], ngf * 2, 7, 7))

    #(G7)
    h3, yb2 = conv_cond_concat2(h2, yb)  #XXX

    #(G8)デコンボリューション:論文によれば、空間プーリングの代わりに適用する
    d = deconv(h3, w3, subsample=(2, 2), border_mode=(2, 2))
    printVal('d', d)  # (128, 64, 14, 14)
    #h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    #(G9)
    h4 = relu(batchnorm(d))
    #(G10)
    h5, yb3 = conv_cond_concat2(h4, yb)
    #(G11)
    x = sigmoid(deconv(h5, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x, yb, yb2, d, h3, h5
示例#16
0
def converter( IS, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5,
        w5d, g5d, b5d, w4d, g4d, b4d, w3d, g3d, b3d, w2d, g2d, b2d, w1d ):
    h1 = lrelu( dnn_conv( IS, w1, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    h5 = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    h5d = relu( batchnorm( dnn_conv( h5, w5d, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5d, b = b5d ) )
    h5d = h5d.reshape( ( h5d.shape[0], nf * 8, 4, 4 ) )
    h4d = relu( batchnorm( deconv( h5d, w4d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4d, b = b4d ) )
    h3d = relu( batchnorm( deconv( h4d, w3d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3d, b = b3d ) )
    h2d = relu( batchnorm( deconv( h3d, w2d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2d, b = b2d ) )
    h1d = tanh( deconv( h2d, w1d, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ) )
    y = h1d
    return y
 def gen_function(self, test_mode, Z, w, g, b, u, s, w2, g2, b2, u2, s2, w3, g3, b3, u3, s3, w4, g4, b4, u4, s4, wx):
     x1, updates1 = running_batchnorm(T.dot(Z, w), g=g, b=b, running_u=u, running_s=s, momentum=self.bn_momentum,
                                      test_mode=test_mode)
     h = relu(x1)
     h = h.reshape((h.shape[0], self.ngf*16, self.last_shape[0], self.last_shape[1]))
     x2, updates2 = running_batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2,
                                      running_u=u2, running_s=s2, momentum=self.bn_momentum, test_mode=test_mode)
     h2 = relu(x2)
     x3, updates3 = running_batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3,
                                      running_u=u3, running_s=s3, momentum=self.bn_momentum, test_mode=test_mode)
     h3 = relu(x3)
     x4, updates4 = running_batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4,
                                      running_u=u4, running_s=s4, momentum=self.bn_momentum, test_mode=test_mode)
     h4 = relu(x4)
     x = tanh(deconv(h4, wx, subsample=self.visible_subsamp, border_mode=self.visible_bmode))
     return x, updates1 + updates2 + updates3 + updates4
示例#18
0
 def decoder_function(hidden_data):
     # layer 0 (linear)
     h0 = T.dot(hidden_data, linear_w0) + linear_b0
     h0 = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1 = deconv(relu(h0), conv_w1, subsample=(2, 2), border_mode=(2, 2)) + conv_b1.dimshuffle("x", 0, "x", "x")
     # layer 2 (deconv)
     h2 = deconv(relu(h1), conv_w2, subsample=(2, 2), border_mode=(2, 2)) + conv_b2.dimshuffle("x", 0, "x", "x")
     # layer 3 (deconv)
     h3 = deconv(relu(h2), conv_w3, subsample=(2, 2), border_mode=(2, 2)) + conv_b3.dimshuffle("x", 0, "x", "x")
     # layer_output (deconv)
     h4 = deconv(relu(h3), conv_w4, subsample=(2, 2), border_mode=(2, 2)) + conv_b4.dimshuffle("x", 0, "x", "x")
     output = tanh(h4)
     return [
         [hidden_data, T.flatten(h0, 2), T.flatten(h1, 2), T.flatten(h2, 2), T.flatten(h3, 2), T.flatten(h4, 2)],
         output,
     ]
示例#19
0
 def decoder_feature_function(hidden_data):
     # layer 0 (linear)
     h0 = T.dot(hidden_data, linear_w0) + linear_b0
     h0 = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1 = deconv(relu(h0), conv_w1, subsample=(2, 2), border_mode=(2, 2)) + conv_b1.dimshuffle('x', 0, 'x', 'x')
     # layer 2 (deconv)
     h2 = deconv(relu(h1), conv_w2, subsample=(2, 2), border_mode=(2, 2)) + conv_b2.dimshuffle('x', 0, 'x', 'x')
     # layer 3 (deconv)
     h3 = deconv(relu(h2), conv_w3, subsample=(2, 2), border_mode=(2, 2)) + conv_b3.dimshuffle('x', 0, 'x', 'x')
     # feature
     feature = relu(h3)
     return [[T.flatten(h0,2),
              T.flatten(h1,2),
              T.flatten(h2,2),
              T.flatten(h3,2)],
             feature]
示例#20
0
def gen_postlearn(_z, _params, n_layers=3, n_f=128, init_sz=4):
    output = []
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0_o = T.dot(_z, gw0)
    output = [h0_o]
    h0 = relu(batchnorm(h0_o, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        h_o = deconv(hin, w, subsample=(2, 2), border_mode=(2, 2))
        hout = relu(batchnorm(h_o, g=g, b=b))
        hs.append(hout)
        output.append(h_o)
    x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    return x, output
def gen(Z, Y):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    Z = T.concatenate([Z, Y], axis=1)
    h = relu(batchnorm(T.dot(Z, gw), g=gg, b=gb))
    h = h.reshape((h.shape[0], ngf * 4, 4, 4))
    h = conv_cond_concat(h, yb)
    h2 = relu(
        batchnorm(deconv(h, gw2, subsample=(2, 2), border_mode=(2, 2)),
                  g=gg2,
                  b=gb2))
    h2 = conv_cond_concat(h2, yb)
    h3 = relu(
        batchnorm(deconv(h2, gw3, subsample=(2, 2), border_mode=(2, 2)),
                  g=gg3,
                  b=gb3))
    h3 = conv_cond_concat(h3, yb)
    x = tanh(deconv(h3, gw4, subsample=(2, 2), border_mode=(2, 2)))
    return x
示例#22
0
 def generator_function(hidden_data, is_train=True):
     # layer 0 (linear)
     h0     = T.dot(hidden_data, linear_w0)
     h0     = h0 + t_rng.normal(size=h0.shape, std=0.01, dtype=t_floatX)
     h0     = relu(batchnorm(X=h0, g=linear_bn_w0, b=linear_bn_b0))
     h0     = h0.reshape((h0.shape[0], num_gen_filters0, init_image_size, init_image_size))
     # layer 1 (deconv)
     h1     = deconv(h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))
     h1     = h1 + t_rng.normal(size=h1.shape, std=0.01, dtype=t_floatX)
     h1     = relu(batchnorm(h1, g=conv_bn_w1, b=conv_bn_b1))
     # layer 2 (deconv)
     h2     = deconv(h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))
     h2     = h2 + t_rng.normal(size=h2.shape, std=0.01, dtype=t_floatX)
     h2     = relu(batchnorm(h2, g=conv_bn_w2, b=conv_bn_b2))
     # layer 3 (deconv)
     h3     = deconv(h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))
     h3     = h3 + t_rng.normal(size=h3.shape, std=0.01, dtype=t_floatX)
     h3     = relu(batchnorm(h3, g=conv_bn_w3, b=conv_bn_b3))
     # layer 4 (deconv)
     output = tanh(deconv(h3, conv_w4, subsample=(2, 2), border_mode=(2, 2))+conv_b4.dimshuffle('x', 0, 'x', 'x'))
     return output
示例#23
0
def gen(_z, _params, n_layers=3, n_f=128, init_sz=4, nc=3):
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0 = relu(batchnorm(T.dot(_z, gw0), g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      g=g,
                      b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))

    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f
示例#24
0
def gen_test(_z,
             _params,
             _batchnorm,
             n_layers=3,
             n_f=128,
             init_sz=4,
             nc=3,
             use_tanh=False):
    if use_tanh:
        _z = tanh(_z)
    # gw0 : weight of dense layer(0)
    # gg0 , gb0 : params of batchnorm layer
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _batchnorm[0]
    s = _batchnorm[n_layers + 1]
    # Clip z => Dense => BatchNorm => ReLU
    h0 = relu(
        batchnorm(T.dot(T.clip(_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0, b=gb0))
    # reshape to 4D
    h1 = h0.reshape((h0.shape[0], n_f * 2**n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _batchnorm[n + 1]
        s = _batchnorm[n + n_layers + 2]
        hout = relu(
            batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)),
                      u=u,
                      s=s,
                      g=g,
                      b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))
    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f
示例#25
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6, g6, b6,
        wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf * 4, 4, 4))
    h2 = relu(
        batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(1, 1)),
                  g=g2,
                  b=b2))
    h3 = relu(
        batchnorm(deconv(h2, w3, subsample=(1, 1), border_mode=(1, 1)),
                  g=g3,
                  b=b3))
    h4 = relu(
        batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(1, 1)),
                  g=g4,
                  b=b4))
    h5 = relu(
        batchnorm(deconv(h4, w5, subsample=(1, 1), border_mode=(1, 1)),
                  g=g5,
                  b=b5))
    h6 = relu(
        batchnorm(deconv(h5, w6, subsample=(2, 2), border_mode=(1, 1)),
                  g=g6,
                  b=b6))
    x = tanh(deconv(h6, wx, subsample=(1, 1), border_mode=(1, 1)))
    return x
    def sample_generator(hidden_data):
        # linear stage (hidden_size => 2x2x512)
        seed = T.dot(hidden_data, linear_w0) + linear_b0
        seed = seed.reshape((seed.shape[0], num_gen_filters0, init_image_size, init_image_size))
        seed = relu(seed)

        # deconv stage 0 (2x2x512=>4x4x512=>4x4x512=>4x4x512)
        h0_0 =   deconv(      seed, conv_w0_0, subsample=(2, 2), border_mode=(1, 1))+conv_b0_0.dimshuffle('x', 0, 'x', 'x')
        h0_1 = dnn_conv(relu(h0_0), conv_w0_1, subsample=(1, 1), border_mode=(1, 1))+conv_b0_1.dimshuffle('x', 0, 'x', 'x')
        h0_2 = dnn_conv(relu(h0_1), conv_w0_2, subsample=(1, 1), border_mode=(1, 1))+conv_b0_2.dimshuffle('x', 0, 'x', 'x')

        # deconv stage 1 (4x4x512=>8x8x512=>8x8x512=>8x8x512)
        h1_0 =   deconv(relu(h0_2), conv_w1_0, subsample=(2, 2), border_mode=(1, 1))+conv_b1_0.dimshuffle('x', 0, 'x', 'x')
        h1_1 = dnn_conv(relu(h1_0), conv_w1_1, subsample=(1, 1), border_mode=(1, 1))+conv_b1_1.dimshuffle('x', 0, 'x', 'x')
        h1_2 = dnn_conv(relu(h1_1), conv_w1_2, subsample=(1, 1), border_mode=(1, 1))+conv_b1_2.dimshuffle('x', 0, 'x', 'x')

        # deconv stage 2 (8x8x512=>16x16x256=>16x16x256=>16x16x256)
        h2_0 =   deconv(relu(h1_2), conv_w2_0, subsample=(2, 2), border_mode=(1, 1))+conv_b2_0.dimshuffle('x', 0, 'x', 'x')
        h2_1 = dnn_conv(relu(h2_0), conv_w2_1, subsample=(1, 1), border_mode=(1, 1))+conv_b2_1.dimshuffle('x', 0, 'x', 'x')
        h2_2 = dnn_conv(relu(h2_1), conv_w2_2, subsample=(1, 1), border_mode=(1, 1))+conv_b2_2.dimshuffle('x', 0, 'x', 'x')

        # deconv stage 3 (16x16x256=>32x32x128=>32x32x128)
        h3_0 =   deconv(relu(h2_2), conv_w3_0, subsample=(2, 2), border_mode=(1, 1))+conv_b3_0.dimshuffle('x', 0, 'x', 'x')
        h3_1 = dnn_conv(relu(h3_0), conv_w3_1, subsample=(1, 1), border_mode=(1, 1))+conv_b3_1.dimshuffle('x', 0, 'x', 'x')

        # deconv stage 4 (32x32x128=>64x64x64=>64x64x64)
        h4_0 =   deconv(relu(h3_1), conv_w4_0, subsample=(2, 2), border_mode=(1, 1))+conv_b4_0.dimshuffle('x', 0, 'x', 'x')
        h4_1 = dnn_conv(relu(h4_0), conv_w4_1, subsample=(1, 1), border_mode=(1, 1))+conv_b4_1.dimshuffle('x', 0, 'x', 'x')

        # deconv output (64x64x64=>64x64x3)
        output = dnn_conv(relu(h4_1), conv_w5, subsample=(1, 1), border_mode=(1, 1))+conv_b5.dimshuffle('x', 0, 'x', 'x')
        output = tanh(output)
        return [T.flatten(h4_1, 2), T.flatten(h4_0, 2),
                T.flatten(h3_1, 2), T.flatten(h3_0, 2),
                T.flatten(h2_2, 2), T.flatten(h2_1, 2), T.flatten(h2_0, 2),
                T.flatten(h1_2, 2), T.flatten(h1_1, 2), T.flatten(h1_0, 2),
                T.flatten(h0_2, 2), T.flatten(h0_1, 2), T.flatten(h0_0, 2),
                T.flatten(seed, 2),
                output]
示例#27
0
文件: dcgan_theano.py 项目: hsab/iGAN
def gen_test(_z, _params, _batchnorm, n_layers=3, n_f=128, init_sz=4, nc=3, use_tanh=False):
    if use_tanh:
        _z= tanh(_z)
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _batchnorm[0]
    s = _batchnorm[n_layers + 1]
    h0 = relu(batchnorm(T.dot(T.clip(_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2 ** n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _batchnorm[n + 1]
        s = _batchnorm[n + n_layers + 2]
        hout = relu(batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))
    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f
示例#28
0
def gen(Z, Y, w, w2, w3, wx):
    print '\n@@@@ gen()'
    printVal('Z', Z)  # matrix
    #printVal( 'Y', Y )  # matrix
    printVal('w', w)  # matrix
    printVal('w2', w2)  # matrix

    printVal('w3', w3)  # tensor
    printVal('wx', wx)  # tensor
    # Yの要素の並びの入れ替え。数字の引数は、次元番号。'x' は ブロードキャスト
    # 並び替えの前後で、全体の要素数は変わらない。
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    # yb は4次元テンソル
    #printVal('yb', yb)
    # 行列 Z と Y を結合(横方向)
    Z = T.concatenate([Z, Y], axis=1)  # matrix
    # Z*w(Full Connect) をバッチ正規化して、ReLU 適用
    tmp_a = T.dot(Z, w)  # dot(matrix, matrix)->matrix
    printVal('dot(Z,w) -> tmp_a', tmp_a)
    h = relu(batchnorm(T.dot(Z, w)))  #CCC
    h = T.concatenate([h, Y], axis=1)  #CCC

    printVal('h', h)  # matrix
    h2 = relu(batchnorm(T.dot(h, w2)))  #CCC
    printVal('h2', h2)  #h2:matrix
    h2r = h2.reshape((h2.shape[0], GEN_NUM_FILTER * 2, 7, 7))  #CCC
    printVal('h2r', h2r)  #h2r:tensor
    h2ry = conv_cond_concat(h2r, yb)  #
    printVal('h2ry', h2ry)  #h2:tensor
    # デコンボリューション:論文によれば、空間プーリングの代わりに適用する
    d = deconv(h2ry, w3, subsample=(2, 2), border_mode=(2, 2))
    printVal('d', d)
    #h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
    h3 = relu(batchnorm(d))
    h3 = conv_cond_concat(h3, yb)
    x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2)))
    return x, h2
示例#29
0
 def apply(self, input, rand_vals=None):
     """
     Apply this generator module to some input.
     """
     batch_size = input.shape[0]
     bm = int((self.filt_dim - 1) / 2)  # use "same" mode convolutions
     ss = self.us_stride  # stride for "learned upsampling"
     if self.use_pooling:
         # "unpool" the input if desired
         input = input.repeat(ss, axis=2).repeat(ss, axis=3)
     # get shape for random values that will augment input
     rand_shape = (batch_size, self.rand_chans, input.shape[2],
                   input.shape[3])
     if self.use_rand:
         # augment input with random channels
         if rand_vals is None:
             if self.rand_type == 'normal':
                 rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                             dtype=theano.config.floatX)
             else:
                 rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                              dtype=theano.config.floatX)
         rand_vals = rand_vals.reshape(rand_shape)
         # stack random values on top of input
         full_input = T.concatenate([rand_vals, input], axis=1)
     else:
         # don't augment input with random channels
         full_input = input
     # apply first convolution, perhaps with fractional striding
     if self.use_pooling:
         h1 = dnn_conv(full_input,
                       self.w1,
                       subsample=(1, 1),
                       border_mode=(bm, bm))
     else:
         # apply first conv layer (with fractional stride for upsampling)
         h1 = deconv(full_input,
                     self.w1,
                     subsample=(ss, ss),
                     border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # apply second conv layer
     h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     h2 = relu(h2)
     return h2
示例#30
0
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6, g6, b6, wx):
    h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
    h = h.reshape((h.shape[0], ngf*4, 4, 4))
    h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(1, 1)), g=g2, b=b2))
    h3 = relu(batchnorm(deconv(h2, w3, subsample=(1, 1), border_mode=(1, 1)), g=g3, b=b3))
    h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(1, 1)), g=g4, b=b4))
    h5 = relu(batchnorm(deconv(h4, w5, subsample=(1, 1), border_mode=(1, 1)), g=g5, b=b5))
    h6 = relu(batchnorm(deconv(h5, w6, subsample=(2, 2), border_mode=(1, 1)), g=g6, b=b6))
    x = tanh(deconv(h6, wx, subsample=(1, 1), border_mode=(1, 1)))
    return x
示例#31
0
def discrim(X):
    current_input = dropout(X, 0.3)
    ### encoder ###
    cv1 = relu(
        dnn_conv(current_input, aew1, subsample=(1, 1), border_mode=(1, 1)))
    cv2 = relu(
        batchnorm(dnn_conv(cv1, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2,
                  b=aeb2))
    cv3 = relu(
        batchnorm(dnn_conv(cv2, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3,
                  b=aeb3))
    cv4 = relu(
        batchnorm(dnn_conv(cv3, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4,
                  b=aeb4))
    cv5 = relu(
        batchnorm(dnn_conv(cv4, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5,
                  b=aeb5))
    cv6 = relu(
        batchnorm(dnn_conv(cv5, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6,
                  b=aeb6))

    ### decoder ###
    dv6 = relu(
        batchnorm(deconv(cv6, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6t,
                  b=aeb6t))
    dv5 = relu(
        batchnorm(deconv(dv6, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5t,
                  b=aeb5t))
    dv4 = relu(
        batchnorm(deconv(dv5, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4t,
                  b=aeb4t))
    dv3 = relu(
        batchnorm(deconv(dv4, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3t,
                  b=aeb3t))
    dv2 = relu(
        batchnorm(deconv(dv3, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2t,
                  b=aeb2t))
    dv1 = tanh(deconv(dv2, aew1, subsample=(1, 1), border_mode=(1, 1)))

    rX = dv1

    mse = T.sqrt(T.sum(T.abs_(T.flatten(X - rX, 2)), axis=1)) + T.sqrt(
        T.sum(T.flatten((X - rX)**2, 2), axis=1))  # L1 and L2 loss
    return T.flatten(cv6, 2), rX, mse
 def apply(self, input, rand_vals=None):
     """
     Apply this generator module to some input.
     """
     batch_size = input.shape[0]
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.us_stride               # stride for "learned upsampling"
     if self.use_pooling:
         # "unpool" the input if desired
         input = input.repeat(ss, axis=2).repeat(ss, axis=3)
     # get shape for random values that will augment input
     rand_shape = (batch_size, self.rand_chans, input.shape[2], input.shape[3])
     if self.use_rand:
         # augment input with random channels
         if rand_vals is None:
             if self.rand_type == 'normal':
                 rand_vals = self.rng.normal(size=rand_shape, avg=0.0, std=1.0, \
                                             dtype=theano.config.floatX)
             else:
                 rand_vals = self.rng.uniform(size=rand_shape, low=-1.0, high=1.0, \
                                              dtype=theano.config.floatX)
         rand_vals = rand_vals.reshape(rand_shape)
         # stack random values on top of input
         full_input = T.concatenate([rand_vals, input], axis=1)
     else:
         # don't augment input with random channels
         full_input = input
     # apply first convolution, perhaps with fractional striding
     if self.use_pooling:
         h1 = dnn_conv(full_input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     else:
         # apply first conv layer (with fractional stride for upsampling)
         h1 = deconv(full_input, self.w1, subsample=(ss, ss), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = relu(h1)
     # apply second conv layer
     h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_2:
         h2 = batchnorm(h2, g=self.g2, b=self.b2)
     h2 = relu(h2)
     return h2
示例#33
0
 def convolve(self, input, **kwargs):
     return deconv(input, self.W, subsample=(2, 2), border_mode='half')
示例#34
0
 def decoder_green_function(feature_data):
     decoder_green = deconv(feature_data, green_w, subsample=(2, 2), border_mode=(2, 2)) + green_b.dimshuffle('x', 0, 'x', 'x')
     return decoder_green
示例#35
0
 def convolve(self, input, **kwargs):
     return deconv(input, self.W, subsample=(2, 2), border_mode='half')
示例#36
0
 def decoder_red_function(feature_data):
     decoder_red = deconv(feature_data, red_w, subsample=(2, 2), border_mode=(2, 2)) + red_b.dimshuffle('x', 0, 'x', 'x')
     return decoder_red
示例#37
0
def deconv_and_depool(X, w, s=2, b=None, activation=T.nnet.relu):
    return activation(deconv(depool(X, s), w, s, b))
示例#38
0
## encode layer
e_layer_sizes = [128, 32, 8]
e_filter_sizes = [1, 256, 1024]

eX, e_params, e_layers = make_conv_set(X, e_layer_sizes, e_filter_sizes, "e")

## generative layer
g_layer_sizes = [8, 16, 32, 64, 128]
g_num_filters = [1024, 512, 256, 256, 128]

g_out, g_params, g_layers = make_conv_set(eX, g_layer_sizes, g_num_filters,
                                          "g")
gwx = gifn((128, 2, 5, 5), 'gwx')
g_params += [gwx]
gX = tanh(deconv(g_out, gwx, subsample=(1, 1), border_mode=(2, 2)))

## discrim layer(s)

df1 = 128
d_layer_sizes = [128, 64, 32, 16, 8]
d_filter_sizes = [2, df1, 2 * df1, 4 * df1, 8 * df1]

dwy = difn((df1 * 8 * 10 * 8, 1), 'dwy')


def discrim(input, name, weights=None):
    d_out, disc_params, d_layers = make_conv_set(input,
                                                 d_layer_sizes,
                                                 d_filter_sizes,
                                                 name,
def discrim(X, Y):
    def classifier(H, Y):
        p_y_given_x = T.nnet.softmax(T.dot(H, logistic_w) + logistic_b)
        neg_lik = -T.sum(T.mul(T.log(p_y_given_x), Y), axis=1)
        return neg_lik, p_y_given_x

    current_input = dropout(X, 0.2)
    ### encoder ###
    cv1 = relu(
        dnn_conv(current_input, aew1, subsample=(1, 1), border_mode=(1, 1)))
    cv2 = relu(
        batchnorm(dnn_conv(cv1, aew2, subsample=(2, 2), border_mode=(0, 0)),
                  g=aeg2,
                  b=aeb2))
    cv3 = relu(
        batchnorm(dnn_conv(cv2, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3,
                  b=aeb3))
    cv4 = relu(
        batchnorm(dnn_conv(cv3, aew4, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg4,
                  b=aeb4))
    cv5 = relu(
        batchnorm(dnn_conv(cv4, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5,
                  b=aeb5))
    cv6 = relu(
        batchnorm(dnn_conv(cv5, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6,
                  b=aeb6))

    ### decoder ###
    dv6 = relu(
        batchnorm(deconv(cv6, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6t,
                  b=aeb6t))
    dv5 = relu(
        batchnorm(deconv(dv6, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5t,
                  b=aeb5t))
    dv4 = relu(
        batchnorm(deconv(dv5, aew4, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg4t,
                  b=aeb4t))
    dv3 = relu(
        batchnorm(deconv(dv4, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3t,
                  b=aeb3t))
    dv2 = relu(
        batchnorm(deconv(dv3, aew2, subsample=(2, 2), border_mode=(0, 0)),
                  g=aeg2t,
                  b=aeb2t))
    dv1 = tanh(deconv(dv2, aew1, subsample=(1, 1), border_mode=(1, 1)))

    hidden = T.flatten(cv6, 2)
    rX = dv1
    mse = T.sqrt(T.sum(T.flatten((X - rX)**2, 2), axis=1))

    #mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1))
    neg_lik, p_y_given_x = classifier(hidden, Y)
    return hidden, p_y_given_x, rX, mse, neg_lik
示例#40
0
def load_model():
    [e_params, g_params, d_params] = pickle.load(open("faces_dcgan.pkl", "rb"))
    gwx = g_params[-1]
    dwy = d_params[-1]
    # inputs
    X = T.tensor4()
    ## encode layer
    e_layer_sizes = [128, 64, 32, 16, 8]
    e_filter_sizes = [3, 256, 256, 512, 1024]
    eX, e_params, e_layers = make_conv_set(X,
                                           e_layer_sizes,
                                           e_filter_sizes,
                                           "e",
                                           weights=e_params)
    ## generative layer
    g_layer_sizes = [8, 16, 32, 64, 128]
    g_num_filters = [1024, 512, 256, 256, 128]
    g_out, g_params, g_layers = make_conv_set(eX,
                                              g_layer_sizes,
                                              g_num_filters,
                                              "g",
                                              weights=g_params)
    g_params += [gwx]
    gX = tanh(deconv(g_out, gwx, subsample=(1, 1), border_mode=(2, 2)))
    ## discrim layer(s)

    df1 = 128
    d_layer_sizes = [128, 64, 32, 16, 8]
    d_filter_sizes = [3, df1, 2 * df1, 4 * df1, 8 * df1]

    def discrim(input, name, weights=None):
        d_out, disc_params, d_layers = make_conv_set(input,
                                                     d_layer_sizes,
                                                     d_filter_sizes,
                                                     name,
                                                     weights=weights)
        d_flat = T.flatten(d_out, 2)

        disc_params += [dwy]
        y = sigmoid(T.dot(d_flat, dwy))

        return y, disc_params, d_layers

    # target outputs
    target = T.tensor4()

    p_real, d_params, d_layers = discrim(target, "d", weights=d_params)
    # we need to make sure the p_gen params are the same as the p_real params
    p_gen, d_params2, d_layers = discrim(gX, "d", weights=d_params)

    ## GAN costs
    d_cost_real = bce(p_real, T.ones(p_real.shape)).mean()
    d_cost_gen = bce(p_gen, T.zeros(p_gen.shape)).mean()
    g_cost_d = bce(p_gen, T.ones(p_gen.shape)).mean()

    ## MSE encoding cost is done on an (averaged) downscaling of the image
    target_pool = max_pool_2d(target, (4, 4),
                              mode="average_exc_pad",
                              ignore_border=True)
    target_flat = T.flatten(target_pool, 2)
    gX_pool = max_pool_2d(gX, (4, 4),
                          mode="average_exc_pad",
                          ignore_border=True)
    gX_flat = T.flatten(gX_pool, 2)
    enc_cost = mse(gX_flat, target_flat).mean()

    ## generator cost is a linear combination of the discrim cost plus the MSE enocding cost
    d_cost = d_cost_real + d_cost_gen
    g_cost = g_cost_d + enc_cost / 10  ## if the enc_cost is weighted too highly it will take a long time to train

    ## N.B. e_cost and e_updates will only try and minimise MSE loss on the autoencoder (for debugging)
    e_cost = enc_cost

    cost = [g_cost_d, d_cost_real, enc_cost]

    elrt = sharedX(0.002)
    lrt = sharedX(lr)
    d_updater = updates.Adam(lr=lrt,
                             b1=b1,
                             regularizer=updates.Regularizer(l2=l2))
    g_updater = updates.Adam(lr=lrt,
                             b1=b1,
                             regularizer=updates.Regularizer(l2=l2))
    e_updater = updates.Adam(lr=elrt,
                             b1=b1,
                             regularizer=updates.Regularizer(l2=l2))

    d_updates = d_updater(d_params, d_cost)
    g_updates = g_updater(e_params + g_params, g_cost)
    e_updates = e_updater(e_params, e_cost)

    print 'COMPILING'
    t = time()
    _train_g = theano.function([X, target], cost, updates=g_updates)
    _train_d = theano.function([X, target], cost, updates=d_updates)
    _train_e = theano.function([X, target], cost, updates=e_updates)
    _get_cost = theano.function([X, target], cost)
    print('%.2f seconds to compile theano functions' % (time() - t))
    img_dir = "gen_images/"
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)
    ae_encode = theano.function([X, target], [gX, target])
    return ae_encode
示例#41
0
X = T.tensor4()

## encode layer
e_layer_sizes = [128, 32, 8]
e_filter_sizes = [1, 256, 1024]

eX, e_params, e_layers = make_conv_set(X, e_layer_sizes, e_filter_sizes, "e")

## generative layer
g_layer_sizes = [8, 16, 32, 64, 128]
g_num_filters = [1024, 512, 256, 256, 128]

g_out, g_params, g_layers = make_conv_set(eX, g_layer_sizes, g_num_filters, "g")
gwx = gifn((128, 2, 5, 5), 'gwx')
g_params += [gwx]
gX = tanh(deconv(g_out, gwx, subsample=(1, 1), border_mode=(2, 2)))

## discrim layer(s)

df1 = 128
d_layer_sizes = [128, 64, 32, 16, 8]
d_filter_sizes = [2, df1, 2 * df1, 4 * df1, 8 * df1]

dwy = difn((df1 * 8 * 10 * 8, 1), 'dwy')

def discrim(input, name, weights=None):
    d_out, disc_params, d_layers = make_conv_set(input, d_layer_sizes, d_filter_sizes, name, weights = weights)
    d_flat = T.flatten(d_out, 2)
    
    disc_params += [dwy]
    y = sigmoid(T.dot(d_flat, dwy))
示例#42
0
 def decoder_blue_function(feature_data):
     decoder_blue = deconv(feature_data, blue_w, subsample=(2, 2), border_mode=(2, 2)) + blue_b.dimshuffle('x', 0, 'x', 'x')
     return decoder_blue