Example #1
0
 def test_squeeze2d(self):
     arr = np.reshape(np.arange(0, 1 * 8 * 8 * 1, 1), [1, 8, 8, 1])
     x = tf.constant(arr)
     with self.session():
         squeeze_result = squeeze2d(x).eval()
         squeeze_result_old = squeeze2d_old(x).eval()
     import pdb
     pdb.set_trace()
     self.fail()
Example #2
0
def split2d(name, z, cfg, objective=0.):
    with tf.variable_scope(name):
        n_z = ops.int_shape(z)[3]
        z1 = z[:, :, :, :n_z // 2]
        z2 = z[:, :, :, n_z // 2:]
        pz = split2d_prior(z1, cfg)
        objective += pz.logp(z2)
        z1 = ops.squeeze2d(z1)
        eps = pz.get_eps(z2)
        return z1, objective, eps
Example #3
0
def split2d(z, objective=0., hps=None, name=None):
    with tf.variable_scope(name):
        n_z = z.get_shape()[3]
        z1, z2 = tf.split(z, 2, axis=-1)
        pz = split2d_prior(z1, hps=hps)
        obj = pz.logp(z2)
        objective += obj
        z1 = squeeze2d(z1)
        eps = pz.get_eps(z2)
        return z1, objective, eps
Example #4
0
File: layers.py Project: gdahia/DLF
def split2d(z, objective=0., hps=None, name=None):
    with tf.variable_scope(name):
        n_z = z.get_shape()[3]
        z1, z2 = tf.split(z, 2, axis=-1)
        pz = split2d_prior(z1, hps=hps)
        obj = pz.logp(z2)
        objective += obj
        z1 = squeeze2d(z1)
        eps = pz.get_eps(z2)
        return z1, objective, eps
Example #5
0
    def call(self, inputs, determinant=True):
        det_list = []
        z_list = []
        self.z_shape_list = []
        x = inputs
        determinant_accumulator = tf.constant(0.)
        for l in range(0, self.L-1):
            x = ops.squeeze2d(x)
            with tf.name_scope("L_%d"%l):
                for k in range(0, self.K):
                    with tf.name_scope("K_%d"%k):
                        [x, det] = self.blocks[l][k](x, determinant=True)
                        determinant_accumulator += det
                    det_list.append(det)
                with tf.name_scope("extract_latents"):
                    num_channels = x.get_shape().as_list()[-1]
                    self.z_shape_list.append(x[:, :, :, :num_channels//2].get_shape().as_list()[1:])
                    z_list.append(tf.layers.flatten(x[:, :, :, :num_channels//2]))
                    x = x[:, :, :, num_channels//2:]
        x = ops.squeeze2d(x)
        l = max(self.L-1, 0)
        with tf.name_scope("L_%d"%l):
            for k in range(0, self.K):
                with tf.name_scope("K_%d"%k):
                    [x, det] = self.blocks[l][k](x, determinant=True)
                    determinant_accumulator += det
                det_list.append(det)

        self.z_shape_list.append(x.get_shape().as_list()[1:])
        z_list.append(tf.layers.flatten(x))

        # combine z values
        z_list.reverse()
        z = tf.concat(z_list, axis=-1, name="combine_multiscale_latents")
        # combine
        determinants = tf.stack(det_list, name="determinants")
        if determinant:
            return [z, determinant_accumulator]
        return z
Example #6
0
    def encode(self, inputs, labels, condition=None):
        ## Dequantization by adding uniform noise
        with tf.variable_scope("preprocess"):
            self.y = tf.one_hot(labels,
                                depth=self.num_classes,
                                dtype=tf.float32)

            inputs = tf.cast(inputs, 'float32')
            self.height, self.width, self.channels = inputs.get_shape(
            ).as_list()[1:]
            if self.hps.num_bits_x < 8:
                inputs = tf.floor(inputs / 2**(8 - self.hps.num_bits_x))
            inputs = inputs / self.num_bins - 0.5
            inputs = inputs + tf.random_uniform(tf.shape(inputs), 0,
                                                1. / self.num_bins)

            objective = tf.zeros(tf.shape(inputs)[0])

            objective += -np.log(self.num_bins) * np.prod(
                ops.shape(inputs)[1:])
            inputs = squeeze2d(inputs)

        ## Encoder
        if self.hps.conditioning and condition is None:
            condition = self.y
            # with tf.variable_scope("cond_preprocess"):
            #     condition = tf.layers.dense(condition, units=10, use_bias=False)
        z, objective, eps = codec(inputs,
                                  cond=condition,
                                  objective=objective,
                                  hps=self.hps,
                                  reverse=False)

        ## Prior
        with tf.variable_scope("prior"):
            self.hps.top_shape = z.get_shape().as_list()[1:]
            logp, sample, get_eps = prior(self.y, self.hps)
            obj = logp(z)
            eps.append(get_eps(z))
            objective += obj
            self.objective = -objective

            # Class label predict with latent representation
            if self.hps.ycond:
                z_y = tf.reduce_mean(z, axis=[1, 2])
                self.logits = linear_zeros(z_y,
                                           self.num_classes,
                                           name="classifier")
        return eps
Example #7
0
    def _f_loss(self, x, y):

        with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
            y_onehot = tf.cast(tf.one_hot(y, self.cfg.n_y, 1, 0), 'float32')

            # Discrete -> Continuous
            objective = tf.zeros_like(x, dtype='float32')[:, 0, 0, 0]

            z = x  # + tf.random_uniform(tf.shape(x), 0, 1./self.cfg.n_bins)
            objective += - np.log(self.cfg.n_bins) * \
                np.prod(ops.int_shape(z)[1:])

            # Encode
            z = ops.squeeze2d(z, 2)  # > 16x16x12
            z, objective, eps = self.encoder(z, objective)

            # Prior
            self.cfg.top_shape = ops.int_shape(z)[1:]
            logp, _, _ = prior("prior", y_onehot, self.cfg)

            objective += logp(z)

            # Generative loss
            nobj = - objective
            bits_x = nobj / (np.log(2.) * int(x.get_shape()[1]) * int(
                x.get_shape()[2]) * int(x.get_shape()[3]))  # bits per subpixel

            # Predictive loss
            if self.cfg.weight_y > 0 and self.cfg.ycond:
                assert(False)
                # Classification loss
                h_y = tf.reduce_mean(z, axis=[1, 2])
                y_logits = ops.dense(
                    "classifier", h_y, self.cfg.n_y, has_bn=False)
                bits_y = tf.nn.softmax_cross_entropy_with_logits_v2(
                    labels=y_onehot, logits=y_logits) / np.log(2.)

                # Classification accuracy
                y_predicted = tf.argmax(y_logits, 1, output_type=tf.int32)
                classification_error = 1 - \
                    tf.cast(tf.equal(y_predicted, y), tf.float32)
            else:
                bits_y = tf.zeros_like(bits_x)
                classification_error = tf.ones_like(bits_x)

        return bits_x, bits_y, classification_error, eps
Example #8
0
def split2d(z, objective=0., hps=None, name=None):
    '''
    :param z: inputs,通过Squeeze处理之后变为z
    :param objective:未知
    :param hps:参数列表
    :param name:
    :return:
    '''
    with tf.variable_scope(name):
        # 2019.11.6
        n_z = z.get_shape()[3]
        # n_z为inputs的通道数
        z1, z2 = tf.split(z, 2, axis=-1)
        pz = split2d_prior(z1, hps=hps)
        obj = pz.logp(z2)
        objective += obj
        z1 = squeeze2d(z1)
        eps = pz.get_eps(z2)
        return z1, objective, eps
Example #9
0
    def f_encode(self, x, y):
        with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
            y_onehot = tf.cast(tf.one_hot(y, self.cfg.n_y, 1, 0), 'float32')

            # Discrete -> Continuous
            objective = tf.zeros_like(x, dtype='float32')[:, 0, 0, 0]
            z = x + tf.random_uniform(tf.shape(x), 0, 1. / self.cfg.n_bins)
            objective += - np.log(self.cfg.n_bins) * \
                np.prod(ops.int_shape(z)[1:])

            # Encode
            z = ops.squeeze2d(z, 2)  # > 16x16x12
            z, objective, eps = self.encoder(z, objective)

            # Prior
            self.cfg.top_shape = ops.int_shape(z)[1:]
            logp, _, _eps = prior("prior", y_onehot, self.cfg)
            objective += logp(z)
            eps.append(_eps(z))
        return eps
Example #10
0
File: model.py Project: gdahia/DLF
    def encode(self, inputs, labels, condition=None):
        ## Dequantization by adding uniform noise
        with tf.variable_scope("preprocess"):
            self.y = tf.one_hot(labels, depth=self.num_classes, dtype=tf.float32)

            inputs = tf.cast(inputs, 'float32')
            self.height, self.width, self.channels = inputs.get_shape().as_list()[1:]
            if self.hps.num_bits_x < 8:
                inputs = tf.floor(inputs/2**(8-self.hps.num_bits_x))
            inputs = inputs / self.num_bins - 0.5
            inputs = inputs + tf.random_uniform(tf.shape(inputs), 0, 1./self.num_bins)

            objective = tf.zeros(tf.shape(inputs)[0])
            objective += -np.log(self.num_bins) * np.prod(ops.shape(inputs)[1:])
            inputs = squeeze2d(inputs)

        ## Encoder
        if self.hps.conditioning and condition is None:
            condition = self.y
            # with tf.variable_scope("cond_preprocess"):
            #     condition = tf.layers.dense(condition, units=10, use_bias=False)
        z, objective, eps = codec(inputs, cond=condition, objective=objective, hps=self.hps, reverse=False)

        ## Prior
        with tf.variable_scope("prior"):
            self.hps.top_shape = z.get_shape().as_list()[1:]
            logp, sample, get_eps = prior(self.y, self.hps)
            obj = logp(z)
            eps.append(get_eps(z))
            objective += obj
            self.objective = -objective

            # Class label predict with latent representation
            if self.hps.ycond:
                z_y = tf.reduce_mean(z, axis=[1, 2])
                self.logits = linear_zeros(z_y, self.num_classes, name="classifier")
        return eps
Example #11
0
File: model.py Project: Echo002/Lab
    def encode(self, inputs, labels, condition=None):
        # line268
        # Dequantization by adding uniform noise 加入均匀噪声来反量化
        with tf.variable_scope("preprocess"):
            # 采用One-hot编码
            self.y = tf.one_hot(labels,
                                depth=self.num_classes,
                                dtype=tf.float32)

            inputs = tf.cast(inputs, 'float32')
            # tf.cast()数据类型转换
            self.height, self.width, self.channels = inputs.get_shape(
            ).as_list()[1:]
            # num_bits、num_bin:???
            if self.hps.num_bits_x < 8:
                inputs = tf.floor(inputs / 2**(8 - self.hps.num_bits_x))
            inputs = inputs / self.num_bins - 0.5
            # 输入加入随机正态分布
            inputs = inputs + tf.random_uniform(tf.shape(inputs), 0,
                                                1. / self.num_bins)

            # objective:???
            objective = tf.zeros(tf.shape(inputs)[0])

            # np.prod:计算数组中所有元素的乘积
            objective += -np.log(self.num_bins) * np.prod(
                ops.shape(inputs)[1:])
            # 使用Squeezing操作(在进入Squeezing之前的操作未知)
            print("before inter squeeze2d, input.shape=" + inputs.shape())
            inputs = squeeze2d(inputs)
            # inputs的shape为[图片数, 高度, 宽度, 通道数]

        # Encoder 编码
        # 下面作用未知
        if self.hps.conditioning and condition is None:
            condition = self.y
            # with tf.variable_scope("cond_preprocess"):
            #     condition = tf.layers.dense(condition, units=10, use_bias=False)
        print("before inter model.codec, inputs.shape=" + inputs.shape())
        z, objective, eps = codec(inputs,
                                  cond=condition,
                                  objective=objective,
                                  hps=self.hps,
                                  reverse=False)
        # line 11

        # Prior 先验
        with tf.variable_scope("prior"):
            self.hps.top_shape = z.get_shape().as_list()[1:]
            logp, sample, get_eps = prior(self.y, self.hps)
            obj = logp(z)
            eps.append(get_eps(z))
            objective += obj
            self.objective = -objective

            # Class label predict with latent representation:潜变量标签预测
            if self.hps.ycond:
                z_y = tf.reduce_mean(z, axis=[1, 2])
                self.logits = linear_zeros(z_y,
                                           self.num_classes,
                                           name="classifier")
        return eps