def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
Example #2
0
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = -relu(T.dot(h2, wy)+by)
    return y
def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, wa):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = square(T.dot(h2, wy))
    y = T.dot(T.log(1+y), T.exp(wa))
    return y
Example #5
0
def conv_encoder(X, w1, w2, w4, b4, w_mu, b_mu):
    h1 = conv_and_pool(X, w1, s=2)
    h1 = dropout(h1, 0.3)
    h2 = conv_and_pool(h1, w2, s=2)
    h2 = dropout(h2, 0.3)

    h3 = T.flatten(h2, 2)
    h4 = tanh((T.dot(h3, w4) + b4))
    h4 = dropout(h4, 0.3)

    z = T.dot(h4, w_mu) + b_mu
    return z
Example #6
0
def discrim(X):
    current_input = dropout(X, 0.3)
    ### encoder ###
    cv1 = relu(
        dnn_conv(current_input, aew1, subsample=(1, 1), border_mode=(1, 1)))
    cv2 = relu(
        batchnorm(dnn_conv(cv1, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2,
                  b=aeb2))
    cv3 = relu(
        batchnorm(dnn_conv(cv2, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3,
                  b=aeb3))
    cv4 = relu(
        batchnorm(dnn_conv(cv3, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4,
                  b=aeb4))
    cv5 = relu(
        batchnorm(dnn_conv(cv4, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5,
                  b=aeb5))
    cv6 = relu(
        batchnorm(dnn_conv(cv5, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6,
                  b=aeb6))

    ### decoder ###
    dv6 = relu(
        batchnorm(deconv(cv6, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6t,
                  b=aeb6t))
    dv5 = relu(
        batchnorm(deconv(dv6, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5t,
                  b=aeb5t))
    dv4 = relu(
        batchnorm(deconv(dv5, aew4, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg4t,
                  b=aeb4t))
    dv3 = relu(
        batchnorm(deconv(dv4, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3t,
                  b=aeb3t))
    dv2 = relu(
        batchnorm(deconv(dv3, aew2, subsample=(4, 4), border_mode=(2, 2)),
                  g=aeg2t,
                  b=aeb2t))
    dv1 = tanh(deconv(dv2, aew1, subsample=(1, 1), border_mode=(1, 1)))

    rX = dv1

    mse = T.sqrt(T.sum(T.abs_(T.flatten(X - rX, 2)), axis=1)) + T.sqrt(
        T.sum(T.flatten((X - rX)**2, 2), axis=1))  # L1 and L2 loss
    return T.flatten(cv6, 2), rX, mse
Example #7
0
    dz_logpzx = T.grad(T.sum(logpxz), z) - z
    tensor_grad_z = T.reshape(dz_logpzx, (-1, num_z, nz))

    # notice we multiply 100 on dxkxy
    vgd_grad_tensor = (T.batched_dot(Kxy, tensor_grad_z) + 100 * dxkxy) / T.tile(\
         T.mean(Kxy, axis=2).dimshuffle(0, 1, 'x'), (1, 1, nz))

    return vgd_grad_tensor


X = T.tensor4('X')
num_z = T.iscalar('num_z')

# for generatiing image functions

func_x_corrupt = dropout(X, p=drop_p)
func_x_corrupt =T.clip(func_x_corrupt, 1e-6, 1 - 1e-6)

func_z = conv_encoder(func_x_corrupt, *enc_params)
func_res_x, _ = conv_decoder(X, func_z, *dec_params)

# functions for svgd training
x_repeated = T.repeat(X, num_z, axis=0)
x_dropout = dropout(x_repeated, p=drop_p)
x_corrupt = T.clip(x_dropout, 1e-6, 1- 1e-6)

z = conv_encoder(x_corrupt, *enc_params)
reconstructed_x, logpxz = conv_decoder(x_repeated, z, *dec_params)

z_vgd_grad = 0. - _vgd_gradient(z, num_z, logpxz)
def discrim(X, Y):
    def classifier(H, Y):
        p_y_given_x = T.nnet.softmax(T.dot(H, logistic_w) + logistic_b)
        neg_lik = -T.sum(T.mul(T.log(p_y_given_x), Y), axis=1)
        return neg_lik, p_y_given_x

    current_input = dropout(X, 0.2)
    ### encoder ###
    cv1 = relu(
        dnn_conv(current_input, aew1, subsample=(1, 1), border_mode=(1, 1)))
    cv2 = relu(
        batchnorm(dnn_conv(cv1, aew2, subsample=(2, 2), border_mode=(0, 0)),
                  g=aeg2,
                  b=aeb2))
    cv3 = relu(
        batchnorm(dnn_conv(cv2, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3,
                  b=aeb3))
    cv4 = relu(
        batchnorm(dnn_conv(cv3, aew4, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg4,
                  b=aeb4))
    cv5 = relu(
        batchnorm(dnn_conv(cv4, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5,
                  b=aeb5))
    cv6 = relu(
        batchnorm(dnn_conv(cv5, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6,
                  b=aeb6))

    ### decoder ###
    dv6 = relu(
        batchnorm(deconv(cv6, aew6, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg6t,
                  b=aeb6t))
    dv5 = relu(
        batchnorm(deconv(dv6, aew5, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg5t,
                  b=aeb5t))
    dv4 = relu(
        batchnorm(deconv(dv5, aew4, subsample=(4, 4), border_mode=(0, 0)),
                  g=aeg4t,
                  b=aeb4t))
    dv3 = relu(
        batchnorm(deconv(dv4, aew3, subsample=(1, 1), border_mode=(1, 1)),
                  g=aeg3t,
                  b=aeb3t))
    dv2 = relu(
        batchnorm(deconv(dv3, aew2, subsample=(2, 2), border_mode=(0, 0)),
                  g=aeg2t,
                  b=aeb2t))
    dv1 = tanh(deconv(dv2, aew1, subsample=(1, 1), border_mode=(1, 1)))

    hidden = T.flatten(cv6, 2)
    rX = dv1
    mse = T.sqrt(T.sum(T.flatten((X - rX)**2, 2), axis=1))

    #mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1))
    neg_lik, p_y_given_x = classifier(hidden, Y)
    return hidden, p_y_given_x, rX, mse, neg_lik