Ejemplo n.º 1
0
def model_spec(x):
    xs = nn.int_shape(x)
    sum_log_det_jacobians = tf.zeros(xs[0])

    # model logit instead of the x itself
    y = x
    alpha = 1e-5
    y = y * (1 - alpha) + alpha * 0.5
    jac = tf.reduce_sum(-tf.log(y) - tf.log(1 - y), [1, 2, 3])
    y = tf.log(y) - tf.log(1 - y)
    sum_log_det_jacobians += jac

    if len(layers) == 0:
        construct_model_spec()

    # construct forward pass
    z = None
    jac = sum_log_det_jacobians

    # TODO change for CIFAR-10
    #y=x

    for layer in layers:
        y, jac, z = layer.forward_and_jacobian(y, jac, z)

    if z is None:
        z = y
    else:
        z = tf.concat([z, y], axis=3)

    # record dimension of the final variable
    global final_latent_dimension
    final_latent_dimension = nn.int_shape(z)

    return z, jac
Ejemplo n.º 2
0
def model_spec(x):
    counters = {}
    xs = nn.int_shape(x)
    sum_log_det_jacobians = tf.zeros(xs[0])

    # corrupt data (Tapani Raiko's dequantization)
    y = x * 0.5 + 0.5
    y = y * 255.0
    corruption_level = 1.0
    y = y + corruption_level * tf.random_uniform(xs)
    y = y / (255.0 + corruption_level)

    #model logit instead of the x itself
    alpha = 1e-5
    y = y * (1 - alpha) + alpha * 0.5
    jac = tf.reduce_sum(-tf.log(y) - tf.log(1 - y), [1, 2, 3])
    y = tf.log(y) - tf.log(1 - y)
    sum_log_det_jacobians += jac

    if len(layers) == 0:
        construct_model_spec()

    # construct forward pass
    z = None
    jac = sum_log_det_jacobians
    for layer in layers:
        y, jac, z = layer.forward_and_jacobian(y, jac, z)

    z = tf.concat([z, y], 3)

    # record dimension of the final variable
    global final_latent_dimension
    final_latent_dimension = nn.int_shape(z)

    return z, jac
Ejemplo n.º 3
0
def model_spec(x,
               reuse=True,
               model_type="nice",
               train=False,
               alpha=1e-7,
               init_type="uniform",
               hidden_layers=1000,
               no_of_layers=1,
               batch_norm_adaptive=0):
    counters = {}
    xs = nn.int_shape(x)
    sum_log_det_jacobians = tf.zeros(xs[0])

    # corrupt data (Tapani Raiko's dequantization)
    y = x

    y = y * 255.0
    corruption_level = 1.0
    y = y + corruption_level * tf.random_uniform(xs)
    y = y / (255.0 + corruption_level)

    #model logit instead of the x itself
    jac = 0

    y = y * (1 - 2 * alpha) + alpha
    if model_type == "nice":
        jac = tf.reduce_sum(-tf.log(y) - tf.log(1 - y) + tf.log(1 - 2 * alpha),
                            [1])
    else:
        jac = tf.reduce_sum(-tf.log(y) - tf.log(1 - y) + tf.log(1 - 2 * alpha),
                            [1, 2, 3])
    y = tf.log(y) - tf.log(1 - y)
    sum_log_det_jacobians += jac

    if len(layers) == 0:
        if model_type == "nice":
            construct_nice_spec(init_type=init_type,
                                hidden_layers=hidden_layers,
                                no_of_layers=no_of_layers)
        else:
            construct_model_spec(no_of_layers=no_of_layers,
                                 add_scaling=(batch_norm_adaptive != 0))

    # construct forward pass
    z = None
    jac = sum_log_det_jacobians
    for layer in layers:
        y, jac, z = layer.forward_and_jacobian(y,
                                               jac,
                                               z,
                                               reuse=reuse,
                                               train=train)

    if model_type == "nice":
        z = y
    else:
        z = tf.concat(axis=3, values=[z, y])

    # record dimension of the final variable
    global final_latent_dimension
    final_latent_dimension = nn.int_shape(z)

    return z, jac