コード例 #1
0
    def __init__(self,
                 src_path,
                 tgt_path,
                 test=False,
                 src_vocab_size=None,
                 tgt_vocab_size=None,
                 sw2i=None,
                 tw2i=None):
        src, tgt = get_dataset(src_path, tgt_path)

        if test:
            svocab = list(sw2i.keys())
            tvocab = list(tw2i.keys())
            si2w = {v: k for k, v in sw2i.items()}
            ti2w = {v: k for k, v in tw2i.items()}
        else:
            svocab, sw2i, si2w = build_vocab(utils.flatten(src),
                                             src_vocab_size)
            tvocab, tw2i, ti2w = build_vocab(utils.flatten(tgt),
                                             tgt_vocab_size)

        # src_p, tgt_p = [], []
        # for s, t in zip(src, tgt):
        #     src_p.append(utils.prepare_sequence(s + ['</s>'],
        #                                         sw2i).view(1, -1))
        #     tgt_p.append(utils.prepare_sequence(t + ['</s>'],
        #                                         tw2i).view(1, -1))

        self.svocab = svocab
        self.tvocab = tvocab
        self.sw2i = sw2i
        self.tw2i = tw2i
        self.si2w = si2w
        self.ti2w = ti2w
        # self.src = src_p
        # self.tgt = tgt_p
        self.src = [' '.join(s) for s in src]
        self.tgt = [' '.join(t) for t in tgt]
コード例 #2
0
def discriminator(x,
                  convolutional=True,
                  filter_sizes=[5, 5, 5, 5],
                  activation=tf.nn.relu,
                  n_filters=[100, 100, 100, 100]):
    """Summary

    Parameters
    ----------
    x : TYPE
        Description
    convolutional : bool, optional
        Description
    filter_sizes : list, optional
        Description
    activation : TYPE, optional
        Description
    n_filters : list, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    encoding = encoder(
        x=x,
        convolutional=convolutional,
        dimensions=n_filters,
        filter_sizes=filter_sizes,
        activation=activation)

    # flatten, then linear to 1 value
    res = utils.flatten(encoding['z'], name='flatten')
    if res.get_shape().as_list()[-1] > 1:
        res = utils.linear(res, 1)[0]

    return {
        'logits': res,
        'probs': tf.nn.sigmoid(res),
        'Ws': encoding['Ws'],
        'hs': encoding['hs']
    }
コード例 #3
0
def VAEGAN(input_shape=[None, 784],
           n_filters=[64, 64, 64],
           filter_sizes=[4, 4, 4],
           n_hidden=32,
           n_code=2,
           activation=tf.nn.tanh,
           convolutional=False,
           variational=False):
    """Summary

    Parameters
    ----------
    input_shape : list, optional
        Description
    n_filters : list, optional
        Description
    filter_sizes : list, optional
        Description
    n_hidden : int, optional
        Description
    n_code : int, optional
        Description
    activation : TYPE, optional
        Description
    convolutional : bool, optional
        Description
    variational : bool, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    # network input / placeholders for train (bn)
    x = tf.placeholder(tf.float32, input_shape, 'x')
    z_samp = tf.placeholder(tf.float32, [None, n_code], 'z_samp')

    with tf.variable_scope('encoder'):
        encoding = encoder(x=x,
                           n_hidden=n_hidden,
                           convolutional=convolutional,
                           dimensions=n_filters,
                           filter_sizes=filter_sizes,
                           activation=activation)

        with tf.variable_scope('variational'):
            z, z_mu, z_log_sigma, loss_z = variational_bayes(h=encoding['z'],
                                                             n_code=n_code)

    shapes = encoding['shapes'].copy()
    shapes.reverse()
    n_filters_decoder = n_filters.copy()
    n_filters_decoder.reverse()
    n_filters_decoder += [input_shape[-1]]

    with tf.variable_scope('generator'):
        decoding_actual = decoder(z=z,
                                  shapes=shapes,
                                  n_hidden=n_hidden,
                                  convolutional=convolutional,
                                  dimensions=n_filters_decoder,
                                  filter_sizes=filter_sizes,
                                  activation=activation)

    with tf.variable_scope('generator', reuse=True):
        decoding_sampled = decoder(z=z_samp,
                                   shapes=shapes,
                                   n_hidden=n_hidden,
                                   convolutional=convolutional,
                                   dimensions=n_filters_decoder,
                                   filter_sizes=filter_sizes,
                                   activation=activation)

    with tf.variable_scope('discriminator'):
        D_real = discriminator(x,
                               filter_sizes=filter_sizes,
                               n_filters=n_filters,
                               activation=activation)

    with tf.variable_scope('discriminator', reuse=True):
        D_fake = discriminator(decoding_actual['x_tilde'],
                               filter_sizes=filter_sizes,
                               n_filters=n_filters,
                               activation=activation)

    with tf.variable_scope('discriminator', reuse=True):
        D_samp = discriminator(decoding_sampled['x_tilde'],
                               filter_sizes=filter_sizes,
                               n_filters=n_filters,
                               activation=activation)

    with tf.variable_scope('loss'):
        # Weights influence of content/style of decoder
        gamma = tf.placeholder(tf.float32, name='gamma')

        # Discriminator_l Log Likelihood Loss
        loss_D_llike = 0
        for h_fake, h_real in zip(D_fake['hs'][3:], D_real['hs'][3:]):
            loss_D_llike += tf.reduce_sum(
                0.5 * tf.squared_difference(utils.flatten(h_fake),
                                            utils.flatten(h_real)), 1)

        # GAN Loss
        eps = 1e-12
        loss_real = tf.reduce_sum(tf.log(D_real['probs'] + eps), 1)
        loss_fake = tf.reduce_sum(tf.log(1 - D_fake['probs'] + eps), 1)
        loss_samp = tf.reduce_sum(tf.log(1 - D_samp['probs'] + eps), 1)

        loss_GAN = (loss_real + loss_fake + loss_samp) / 3.0

        loss_enc = tf.reduce_mean(loss_z + loss_D_llike)
        loss_gen = tf.reduce_mean(gamma * loss_D_llike - loss_GAN)
        loss_dis = -tf.reduce_mean(loss_GAN)

    return {
        'x': x,
        'z': z,
        'x_tilde': decoding_actual['x_tilde'],
        'z_samp': z_samp,
        'x_tilde_samp': decoding_sampled['x_tilde'],
        'loss_real': loss_real,
        'loss_fake': loss_fake,
        'loss_samp': loss_samp,
        'loss_GAN': loss_GAN,
        'loss_D_llike': loss_D_llike,
        'loss_enc': loss_enc,
        'loss_gen': loss_gen,
        'loss_dis': loss_dis,
        'gamma': gamma
    }
コード例 #4
0
def encoder(x,
            n_hidden=None,
            dimensions=[],
            filter_sizes=[],
            convolutional=False,
            activation=tf.nn.relu,
            output_activation=tf.nn.sigmoid):
    """Summary

    Parameters
    ----------
    x : TYPE
        Description
    n_hidden : None, optional
        Description
    dimensions : list, optional
        Description
    filter_sizes : list, optional
        Description
    convolutional : bool, optional
        Description
    activation : TYPE, optional
        Description
    output_activation : TYPE, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    if convolutional:
        x_tensor = utils.to_tensor(x)
    else:
        x_tensor = tf.reshape(tensor=x, shape=[-1, dimensions[0]])
        dimensions = dimensions[1:]
    current_input = x_tensor

    Ws = []
    hs = []
    shapes = []
    for layer_i, n_output in enumerate(dimensions):
        with tf.variable_scope(str(layer_i)):
            shapes.append(current_input.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(x=current_input,
                                    n_output=n_output,
                                    k_h=filter_sizes[layer_i],
                                    k_w=filter_sizes[layer_i],
                                    padding='SAME')
            else:
                h, W = utils.linear(x=current_input, n_output=n_output)
            h = activation(h)
            Ws.append(W)
            hs.append(h)

        current_input = h

    shapes.append(h.get_shape().as_list())

    with tf.variable_scope('flatten'):
        flattened = utils.flatten(current_input)

    with tf.variable_scope('hidden'):
        if n_hidden:
            h, W = utils.linear(flattened, n_hidden, name='linear')
            h = activation(h)
        else:
            h = flattened

    return {'z': h, 'Ws': Ws, 'hs': hs, 'shapes': shapes}
コード例 #5
0
def VAE(input_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        convolutional=False,
        variational=False):
    """Summary

    Parameters
    ----------
    input_shape : list, optional
        Description
    n_filters : list, optional
        Description
    filter_sizes : list, optional
        Description
    n_hidden : int, optional
        Description
    n_code : int, optional
        Description
    activation : TYPE, optional
        Description
    convolutional : bool, optional
        Description
    variational : bool, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    # network input / placeholders for train (bn)
    x = tf.placeholder(tf.float32, input_shape, 'x')

    with tf.variable_scope('encoder'):
        encoding = encoder(x=x,
                           n_hidden=n_hidden,
                           convolutional=convolutional,
                           dimensions=n_filters,
                           filter_sizes=filter_sizes,
                           activation=activation)

    if variational:
        with tf.variable_scope('variational'):
            z, z_mu, z_log_sigma, loss_z = variational_bayes(h=encoding['z'],
                                                             n_code=n_code)
    else:
        z = encoding['z']
        loss_z = None

    shapes = encoding['shapes'].copy()
    shapes.reverse()
    n_filters = n_filters.copy()
    n_filters.reverse()
    n_filters += [input_shape[-1]]

    with tf.variable_scope('generator'):
        decoding = decoder(z=z,
                           shapes=shapes,
                           n_hidden=n_hidden,
                           dimensions=n_filters,
                           filter_sizes=filter_sizes,
                           convolutional=convolutional,
                           activation=activation)

    x_tilde = decoding['x_tilde']
    x_flat = utils.flatten(x)
    x_tilde_flat = utils.flatten(x_tilde)

    # -log(p(x|z))
    loss_x = tf.reduce_sum(tf.squared_difference(x_flat, x_tilde_flat), 1)
    return {
        'loss_x': loss_x,
        'loss_z': loss_z,
        'x': x,
        'z': z,
        'Ws': encoding['Ws'],
        'hs': decoding['hs'],
        'x_tilde': x_tilde
    }
コード例 #6
0
def test_flatten():
    assert(utils.flatten(
        tf.constant(np.zeros((3, 100, 100, 3)))).get_shape().as_list() == [3, 30000])
コード例 #7
0
ファイル: models.py プロジェクト: mariolew/TF-FaceDetection
def fcn_24_detect(threshold, dropout=False, activation=tf.nn.relu):

    imgs = tf.placeholder(tf.float32, [None, 24, 24, 3])
    labels = tf.placeholder(tf.float32, [None, 1])
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    net_12 = fcn_12_detect(0.16, activation=activation)
    with tf.variable_scope('net_24'):
        conv1, _ = utils.conv2d(x=imgs,
                                n_output=64,
                                k_w=5,
                                k_h=5,
                                d_w=1,
                                d_h=1,
                                name="conv1")
        conv1 = activation(conv1)
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME",
                               name="pool1")
        ip1, W1 = utils.conv2d(x=pool1,
                               n_output=128,
                               k_w=12,
                               k_h=12,
                               d_w=1,
                               d_h=1,
                               padding="VALID",
                               name="ip1")
        ip1 = activation(ip1)
        net_12_ip1 = net_12['features']
        concat = tf.concat(3, [ip1, net_12_ip1])
        if dropout:
            concat = tf.nn.dropout(concat, keep_prob)
        ip2, W2 = utils.conv2d(x=concat,
                               n_output=1,
                               k_w=1,
                               k_h=1,
                               d_w=1,
                               d_h=1,
                               name="ip2")

        pred = tf.nn.sigmoid(utils.flatten(ip2))
        target = utils.flatten(labels)

        regularizer = 8e-3 * (tf.nn.l2_loss(W1) + 100 * tf.nn.l2_loss(W2))

        loss = tf.reduce_mean(
            tf.div(
                tf.add(
                    -tf.reduce_sum(target * tf.log(pred + 1e-9), 1),
                    -tf.reduce_sum((1 - target) * tf.log(1 - pred + 1e-9), 1)),
                2)) + regularizer
        cost = tf.reduce_mean(loss)

        thresholding_24 = tf.cast(tf.greater(pred, threshold), "float")
        recall_24 = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(thresholding_24, tf.constant([1.0])),
                               tf.equal(target, tf.constant([1.0]))),
                "float")) / tf.reduce_sum(target)

        correct_prediction = tf.equal(
            tf.cast(tf.greater(pred, threshold), tf.int32),
            tf.cast(target, tf.int32))
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return {
            'net_12': net_12,
            'imgs': imgs,
            'labels': labels,
            'imgs_12': net_12['imgs'],
            'labels_12': net_12['labels'],
            'keep_prob': keep_prob,
            'keep_prob_12': net_12['keep_prob'],
            'cost': cost,
            'pred': pred,
            'accuracy': acc,
            'features': concat,
            'recall': recall_24,
            'thresholding': thresholding_24
        }
コード例 #8
0
ファイル: vae.py プロジェクト: Liubinggunzu/CADL
def VAE(input_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        dropout=False,
        denoising=False,
        convolutional=False,
        variational=False):
    """(Variational) (Convolutional) (Denoising) Autoencoder.

    Uses tied weights.

    Parameters
    ----------
    input_shape : list, optional
        Shape of the input to the network. e.g. for MNIST: [None, 784].
    n_filters : list, optional
        Number of filters for each layer.
        If convolutional=True, this refers to the total number of output
        filters to create for each layer, with each layer's number of output
        filters as a list.
        If convolutional=False, then this refers to the total number of neurons
        for each layer in a fully connected network.
    filter_sizes : list, optional
        Only applied when convolutional=True.  This refers to the ksize (height
        and width) of each convolutional layer.
    n_hidden : int, optional
        Only applied when variational=True.  This refers to the first fully
        connected layer prior to the variational embedding, directly after
        the encoding.  After the variational embedding, another fully connected
        layer is created with the same size prior to decoding.  Set to 0 to
        not use an additional hidden layer.
    n_code : int, optional
        Only applied when variational=True.  This refers to the number of
        latent Gaussians to sample for creating the inner most encoding.
    activation : function, optional
        Activation function to apply to each layer, e.g. tf.nn.relu
    dropout : bool, optional
        Whether or not to apply dropout.  If using dropout, you must feed a
        value for 'keep_prob', as returned in the dictionary.  1.0 means no
        dropout is used.  0.0 means every connection is dropped.  Sensible
        values are between 0.5-0.8.
    denoising : bool, optional
        Whether or not to apply denoising.  If using denoising, you must feed a
        value for 'corrupt_prob', as returned in the dictionary.  1.0 means no
        corruption is used.  0.0 means every feature is corrupted.  Sensible
        values are between 0.5-0.8.
    convolutional : bool, optional
        Whether or not to use a convolutional network or else a fully connected
        network will be created.  This effects the n_filters parameter's
        meaning.
    variational : bool, optional
        Whether or not to create a variational embedding layer.  This will
        create a fully connected layer after the encoding, if `n_hidden` is
        greater than 0, then will create a multivariate gaussian sampling
        layer, then another fully connected layer.  The size of the fully
        connected layers are determined by `n_hidden`, and the size of the
        sampling layer is determined by `n_code`.

    Returns
    -------
    model : dict
        {
            'cost': Tensor to optimize.
            'Ws': All weights of the encoder.
            'x': Input Placeholder
            'z': Inner most encoding Tensor (latent features)
            'y': Reconstruction of the Decoder
            'keep_prob': Amount to keep when using Dropout
            'corrupt_prob': Amount to corrupt when using Denoising
            'train': Set to True when training/Applies to Batch Normalization.
        }
    """
    # network input / placeholders for train (bn) and dropout
    x = tf.placeholder(tf.float32, input_shape, 'x')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    corrupt_prob = tf.placeholder(tf.float32, [1])

    # apply noise if denoising
    x_ = (utils.corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)) if denoising else x

    # 2d -> 4d if convolution
    x_tensor = utils.to_tensor(x_) if convolutional else x_
    current_input = x_tensor

    Ws = []
    shapes = []

    # Build the encoder
    for layer_i, n_output in enumerate(n_filters):
        with tf.variable_scope('encoder/{}'.format(layer_i)):
            shapes.append(current_input.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(x=current_input,
                                    n_output=n_output,
                                    k_h=filter_sizes[layer_i],
                                    k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input,
                                    n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            Ws.append(W)
            current_input = h

    shapes.append(current_input.get_shape().as_list())

    with tf.variable_scope('variational'):
        if variational:
            dims = current_input.get_shape().as_list()
            flattened = utils.flatten(current_input)

            if n_hidden:
                h = utils.linear(flattened, n_hidden, name='W_fc')[0]
                h = activation(batch_norm(h, phase_train, 'fc/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = flattened

            z_mu = utils.linear(h, n_code, name='mu')[0]
            z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]

            # Sample from noise distribution p(eps) ~ N(0, 1)
            epsilon = tf.random_normal(
                tf.stack([tf.shape(x)[0], n_code]))

            # Sample from posterior
            z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))

            if n_hidden:
                h = utils.linear(z, n_hidden, name='fc_t')[0]
                h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = z

            size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
            h = utils.linear(h, size, name='fc_t2')[0]
            current_input = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
            if dropout:
                current_input = tf.nn.dropout(current_input, keep_prob)

            if convolutional:
                current_input = tf.reshape(
                    current_input, tf.stack([
                        tf.shape(current_input)[0],
                        dims[1],
                        dims[2],
                        dims[3]]))
        else:
            z = current_input

    shapes.reverse()
    n_filters.reverse()
    Ws.reverse()

    n_filters += [input_shape[-1]]

    # %%
    # Decoding layers
    for layer_i, n_output in enumerate(n_filters[1:]):
        with tf.variable_scope('decoder/{}'.format(layer_i)):
            shape = shapes[layer_i + 1]
            if convolutional:
                h, W = utils.deconv2d(x=current_input,
                                      n_output_h=shape[1],
                                      n_output_w=shape[2],
                                      n_output_ch=shape[3],
                                      n_input_ch=shapes[layer_i][3],
                                      k_h=filter_sizes[layer_i],
                                      k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input,
                                    n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            current_input = h

    y = current_input
    x_flat = utils.flatten(x)
    y_flat = utils.flatten(y)

    # l2 loss
    loss_x = tf.reduce_sum(tf.squared_difference(x_flat, y_flat), 1)

    if variational:
        # variational lower bound, kl-divergence
        loss_z = -0.5 * tf.reduce_sum(
            1.0 + 2.0 * z_log_sigma -
            tf.square(z_mu) - tf.exp(2.0 * z_log_sigma), 1)

        # add l2 loss
        cost = tf.reduce_mean(loss_x + loss_z)
    else:
        # just optimize l2 loss
        cost = tf.reduce_mean(loss_x)

    return {'cost': cost, 'Ws': Ws,
            'x': x, 'z': z, 'y': y,
            'keep_prob': keep_prob,
            'corrupt_prob': corrupt_prob,
            'train': phase_train}
best_descs = ['haddad_desc', 'all', 'vib_100']
nice_names = {'none_opt': 'default parameters (q2)',
              'k_opt': 'k_best optimized (q2)',
              'reg_opt': 'C optimized (q2)',
              'both_opt': 'both optimized (q2)',
              }
line_max = 0.9
ticks = [0, 0.2, 0.4, 0.6, 0.8, 0.9]
ticklabels = ['0', '.2', '.4', '.6', '.8', '']
vals = [out_res[k] for k in best_descs]
reference_name = 'none_opt'
to_compare = ['k_opt', 'reg_opt'] #, 'both_opt']
titles = ['c)', 'd)']
for i, pick_type in enumerate(to_compare):

    reference = np.array(utils.flatten([r[reference_name] for r in vals]))
    improved = np.array(utils.flatten([r[pick_type] for r in vals]))
    # only plot values which reach genscore > 0 after paramsearch
    reference = reference[improved > 0]
    improved = improved[improved > 0]
    # don't care how negative it was before search, all < 0 are equally bad
    reference[reference < 0] = 0
    improved[improved < 0] = 0

#    fig = plt.figure(figsize=(2.23, 2))
    ax = plt.subplot(gs[0,i])#fig.add_subplot(1, 3, i+1)
    ax.plot(reference, improved, 'ko', alpha=0.6, markersize=4)
    ax.plot([0, line_max-0.05], [0, line_max-0.05], color='0.5')
    ax.set_ylabel(nice_names[pick_type])
    plt.axis('scaled')
    ax.set_xlim([-0.05, line_max])
コード例 #10
0
def deepID(input_shape=[None, 39, 39, 1],
           n_filters=[20, 40, 60, 80],
           filter_sizes=[4, 3, 3, 2],
           activation=tf.nn.relu,
           dropout=False):
    """DeepID.

    Uses tied weights.

    Parameters
    ----------
    input_shape : list, optional
        Shape of the input to the network. e.g. for MNIST: [None, 784].
    n_filters : list, optional
        Number of filters for each layer.
        If convolutional=True, this refers to the total number of output
        filters to create for each layer, with each layer's number of output
        filters as a list.
        If convolutional=False, then this refers to the total number of neurons
        for each layer in a fully connected network.
    filter_sizes : list, optional
        Only applied when convolutional=True.  This refers to the ksize (height
        and width) of each convolutional layer.
    activation : function, optional
        Activation function to apply to each layer, e.g. tf.nn.relu
    dropout : bool, optional
        Whether or not to apply dropout.  If using dropout, you must feed a
        value for 'keep_prob', as returned in the dictionary.  1.0 means no
        dropout is used.  0.0 means every connection is dropped.  Sensible
        values are between 0.5-0.8.

    Returns
    -------
    model : dict
        {
            'cost': Tensor to optimize.
            'Ws': All weights of the encoder.
            'x': Input Placeholder
            'z': Inner most encoding Tensor (latent features)
            'y': Reconstruction of the Decoder
            'keep_prob': Amount to keep when using Dropout
            'corrupt_prob': Amount to corrupt when using Denoising
            'train': Set to True when training/Applies to Batch Normalization.
        }
    """
    # network input / placeholders for train (bn) and dropout
    x = tf.placeholder(tf.float32, input_shape, 'x')
    y = tf.placeholder(tf.float32, [None, 10], 'y')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    # 2d -> 4d if convolution
    x_tensor = utils.to_tensor(x)
    current_input = x_tensor

    Ws = []
    shapes = []

    # Build the encoder
    shapes.append(current_input.get_shape().as_list())
    conv1, W = utils.conv2d(x=x_tensor,
                            n_output=n_filters[0],
                            k_h=filter_sizes[0],
                            k_w=filter_sizes[0],
                            d_w=1,
                            d_h=1,
                            name='conv1')
    Ws.append(W)
    # conv1 = activation(batch_norm(conv1, phase_train, 'bn1'))
    conv1 = activation(conv1)

    pool1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='pool1')

    conv2, W = utils.conv2d(x=pool1,
                            n_output=n_filters[1],
                            k_h=filter_sizes[1],
                            k_w=filter_sizes[1],
                            d_w=1,
                            d_h=1,
                            name='conv2')
    Ws.append(W)
    # conv2 = activation(batch_norm(conv2, phase_train, 'bn2'))
    conv2 = activation(conv2)

    pool2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='pool2')

    conv3, W = utils.conv2d(x=pool2,
                            n_output=n_filters[2],
                            k_h=filter_sizes[2],
                            k_w=filter_sizes[2],
                            d_w=1,
                            d_h=1,
                            name='conv3')
    Ws.append(W)
    # conv3 = activation(batch_norm(conv3, phase_train, 'bn3'))
    conv3 = activation(conv3)

    pool3 = tf.nn.max_pool(conv3,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='pool3')

    conv4, W = utils.conv2d(x=pool3,
                            n_output=n_filters[3],
                            k_h=filter_sizes[3],
                            k_w=filter_sizes[3],
                            d_w=1,
                            d_h=1,
                            name='conv4')
    Ws.append(W)
    # conv4 = activation(batch_norm(conv4, phase_train, 'bn4'))
    conv4 = activation(conv4)

    pool3_flat = utils.flatten(pool3)
    conv4_flat = utils.flatten(conv4)
    concat = tf.concat(1, [pool3_flat, conv4_flat], name='concat')

    ip1, W = utils.linear(concat, 120, name='ip1')
    Ws.append(W)
    ip1 = activation(ip1)
    if dropout:
        ip1 = tf.nn.dropout(ip1, keep_prob)

    ip2, W = utils.linear(ip1, 10, name='ip2')
    Ws.append(W)
    # ip2 = activation(ip2)

    p_flat = utils.flatten(ip2)
    y_flat = utils.flatten(y)

    regularizers = 5e-4 * (tf.nn.l2_loss(Ws[-1]) + tf.nn.l2_loss(Ws[-2]))
    # l2 loss
    loss_x = tf.reduce_sum(tf.squared_difference(p_flat, y_flat), 1)
    cost = tf.reduce_mean(loss_x) + regularizers
    prediction = tf.reshape(p_flat, (-1, 5, 2))

    return {
        'cost': cost,
        'Ws': Ws,
        'x': x,
        'y': y,
        'pred': prediction,
        'keep_prob': keep_prob,
        'train': phase_train
    }
コード例 #11
0
ファイル: vaegan.py プロジェクト: pkmital/CADL
def VAEGAN(input_shape=[None, 784],
           n_filters=[64, 64, 64],
           filter_sizes=[4, 4, 4],
           n_hidden=32,
           n_code=2,
           activation=tf.nn.tanh,
           convolutional=False,
           variational=False):
    """Summary

    Parameters
    ----------
    input_shape : list, optional
        Description
    n_filters : list, optional
        Description
    filter_sizes : list, optional
        Description
    n_hidden : int, optional
        Description
    n_code : int, optional
        Description
    activation : TYPE, optional
        Description
    convolutional : bool, optional
        Description
    variational : bool, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    # network input / placeholders for train (bn)
    x = tf.placeholder(tf.float32, input_shape, 'x')
    z_samp = tf.placeholder(tf.float32, [None, n_code], 'z_samp')

    with tf.variable_scope('encoder'):
        encoding = encoder(
            x=x,
            n_hidden=n_hidden,
            convolutional=convolutional,
            dimensions=n_filters,
            filter_sizes=filter_sizes,
            activation=activation)

        with tf.variable_scope('variational'):
            z, z_mu, z_log_sigma, loss_z = variational_bayes(
                h=encoding['z'], n_code=n_code)

    shapes = encoding['shapes'].copy()
    shapes.reverse()
    n_filters_decoder = n_filters.copy()
    n_filters_decoder.reverse()
    n_filters_decoder += [input_shape[-1]]

    with tf.variable_scope('generator'):
        decoding_actual = decoder(
            z=z,
            shapes=shapes,
            n_hidden=n_hidden,
            convolutional=convolutional,
            dimensions=n_filters_decoder,
            filter_sizes=filter_sizes,
            activation=activation)

    with tf.variable_scope('generator', reuse=True):
        decoding_sampled = decoder(
            z=z_samp,
            shapes=shapes,
            n_hidden=n_hidden,
            convolutional=convolutional,
            dimensions=n_filters_decoder,
            filter_sizes=filter_sizes,
            activation=activation)

    with tf.variable_scope('discriminator'):
        D_real = discriminator(
            x,
            filter_sizes=filter_sizes,
            n_filters=n_filters,
            activation=activation)

    with tf.variable_scope('discriminator', reuse=True):
        D_fake = discriminator(
            decoding_actual['x_tilde'],
            filter_sizes=filter_sizes,
            n_filters=n_filters,
            activation=activation)

    with tf.variable_scope('discriminator', reuse=True):
        D_samp = discriminator(
            decoding_sampled['x_tilde'],
            filter_sizes=filter_sizes,
            n_filters=n_filters,
            activation=activation)

    with tf.variable_scope('loss'):
        # Weights influence of content/style of decoder
        gamma = tf.placeholder(tf.float32, name='gamma')

        # Discriminator_l Log Likelihood Loss
        loss_D_llike = 0
        for h_fake, h_real in zip(D_fake['hs'][3:], D_real['hs'][3:]):
            loss_D_llike += tf.reduce_sum(0.5 * tf.squared_difference(
                utils.flatten(h_fake), utils.flatten(h_real)), 1)

        # GAN Loss
        eps = 1e-12
        loss_real = tf.reduce_sum(tf.log(D_real['probs'] + eps), 1)
        loss_fake = tf.reduce_sum(tf.log(1 - D_fake['probs'] + eps), 1)
        loss_samp = tf.reduce_sum(tf.log(1 - D_samp['probs'] + eps), 1)

        loss_GAN = (loss_real + loss_fake + loss_samp) / 3.0

        loss_enc = tf.reduce_mean(loss_z + loss_D_llike)
        loss_gen = tf.reduce_mean(gamma * loss_D_llike - loss_GAN)
        loss_dis = -tf.reduce_mean(loss_GAN)

    return {
        'x': x,
        'z': z,
        'x_tilde': decoding_actual['x_tilde'],
        'z_samp': z_samp,
        'x_tilde_samp': decoding_sampled['x_tilde'],
        'loss_real': loss_real,
        'loss_fake': loss_fake,
        'loss_samp': loss_samp,
        'loss_GAN': loss_GAN,
        'loss_D_llike': loss_D_llike,
        'loss_enc': loss_enc,
        'loss_gen': loss_gen,
        'loss_dis': loss_dis,
        'gamma': gamma
    }
コード例 #12
0
ファイル: vaegan.py プロジェクト: pkmital/CADL
def encoder(x,
            n_hidden=None,
            dimensions=[],
            filter_sizes=[],
            convolutional=False,
            activation=tf.nn.relu,
            output_activation=tf.nn.sigmoid):
    """Summary

    Parameters
    ----------
    x : TYPE
        Description
    n_hidden : None, optional
        Description
    dimensions : list, optional
        Description
    filter_sizes : list, optional
        Description
    convolutional : bool, optional
        Description
    activation : TYPE, optional
        Description
    output_activation : TYPE, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    if convolutional:
        x_tensor = utils.to_tensor(x)
    else:
        x_tensor = tf.reshape(tensor=x, shape=[-1, dimensions[0]])
        dimensions = dimensions[1:]
    current_input = x_tensor

    Ws = []
    hs = []
    shapes = []
    for layer_i, n_output in enumerate(dimensions):
        with tf.variable_scope(str(layer_i)):
            shapes.append(current_input.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(
                    x=current_input,
                    n_output=n_output,
                    k_h=filter_sizes[layer_i],
                    k_w=filter_sizes[layer_i],
                    padding='SAME')
            else:
                h, W = utils.linear(x=current_input, n_output=n_output)
            h = activation(h)
            Ws.append(W)
            hs.append(h)

        current_input = h

    shapes.append(h.get_shape().as_list())

    with tf.variable_scope('flatten'):
        flattened = utils.flatten(current_input)

    with tf.variable_scope('hidden'):
        if n_hidden:
            h, W = utils.linear(flattened, n_hidden, name='linear')
            h = activation(h)
        else:
            h = flattened

    return {'z': h, 'Ws': Ws, 'hs': hs, 'shapes': shapes}
コード例 #13
0
ファイル: vaegan.py プロジェクト: pkmital/CADL
def VAE(input_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        convolutional=False,
        variational=False):
    """Summary

    Parameters
    ----------
    input_shape : list, optional
        Description
    n_filters : list, optional
        Description
    filter_sizes : list, optional
        Description
    n_hidden : int, optional
        Description
    n_code : int, optional
        Description
    activation : TYPE, optional
        Description
    convolutional : bool, optional
        Description
    variational : bool, optional
        Description

    Returns
    -------
    name : TYPE
        Description
    """
    # network input / placeholders for train (bn)
    x = tf.placeholder(tf.float32, input_shape, 'x')

    with tf.variable_scope('encoder'):
        encoding = encoder(
            x=x,
            n_hidden=n_hidden,
            convolutional=convolutional,
            dimensions=n_filters,
            filter_sizes=filter_sizes,
            activation=activation)

    if variational:
        with tf.variable_scope('variational'):
            z, z_mu, z_log_sigma, loss_z = variational_bayes(
                h=encoding['z'], n_code=n_code)
    else:
        z = encoding['z']
        loss_z = None

    shapes = encoding['shapes'].copy()
    shapes.reverse()
    n_filters = n_filters.copy()
    n_filters.reverse()
    n_filters += [input_shape[-1]]

    with tf.variable_scope('generator'):
        decoding = decoder(
            z=z,
            shapes=shapes,
            n_hidden=n_hidden,
            dimensions=n_filters,
            filter_sizes=filter_sizes,
            convolutional=convolutional,
            activation=activation)

    x_tilde = decoding['x_tilde']
    x_flat = utils.flatten(x)
    x_tilde_flat = utils.flatten(x_tilde)

    # -log(p(x|z))
    loss_x = tf.reduce_sum(tf.squared_difference(x_flat, x_tilde_flat), 1)
    return {
        'loss_x': loss_x,
        'loss_z': loss_z,
        'x': x,
        'z': z,
        'Ws': encoding['Ws'],
        'hs': decoding['hs'],
        'x_tilde': x_tilde
    }
コード例 #14
0
def VAE(input_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        dropout=False,
        denoising=False,
        convolutional=False,
        variational=False):
    """(Variational) (Convolutional) (Denoising) Autoencoder.
    Uses tied weights.
    Parameters
    ----------
    input_shape : list, optional
        Shape of the input to the network. e.g. for MNIST: [None, 784].
    n_filters : list, optional
        Number of filters for each layer.
        If convolutional=True, this refers to the total number of output
        filters to create for each layer, with each layer's number of output
        filters as a list.
        If convolutional=False, then this refers to the total number of neurons
        for each layer in a fully connected network.
    filter_sizes : list, optional
        Only applied when convolutional=True.  This refers to the ksize (height
        and width) of each convolutional layer.
    n_hidden : int, optional
        Only applied when variational=True.  This refers to the first fully
        connected layer prior to the variational embedding, directly after
        the encoding.  After the variational embedding, another fully connected
        layer is created with the same size prior to decoding.  Set to 0 to
        not use an additional hidden layer.
    n_code : int, optional
        Only applied when variational=True.  This refers to the number of
        latent Gaussians to sample for creating the inner most encoding.
    activation : function, optional
        Activation function to apply to each layer, e.g. tf.nn.relu
    dropout : bool, optional
        Whether or not to apply dropout.  If using dropout, you must feed a
        value for 'keep_prob', as returned in the dictionary.  1.0 means no
        dropout is used.  0.0 means every connection is dropped.  Sensible
        values are between 0.5-0.8.
    denoising : bool, optional
        Whether or not to apply denoising.  If using denoising, you must feed a
        value for 'corrupt_prob', as returned in the dictionary.  1.0 means no
        corruption is used.  0.0 means every feature is corrupted.  Sensible
        values are between 0.5-0.8.
    convolutional : bool, optional
        Whether or not to use a convolutional network or else a fully connected
        network will be created.  This effects the n_filters parameter's
        meaning.
    variational : bool, optional
        Whether or not to create a variational embedding layer.  This will
        create a fully connected layer after the encoding, if `n_hidden` is
        greater than 0, then will create a multivariate gaussian sampling
        layer, then another fully connected layer.  The size of the fully
        connected layers are determined by `n_hidden`, and the size of the
        sampling layer is determined by `n_code`.
    Returns
    -------
    model : dict
        {
            'cost': Tensor to optimize.
            'Ws': All weights of the encoder.
            'x': Input Placeholder
            'z': Inner most encoding Tensor (latent features)
            'y': Reconstruction of the Decoder
            'keep_prob': Amount to keep when using Dropout
            'corrupt_prob': Amount to corrupt when using Denoising
            'train': Set to True when training/Applies to Batch Normalization.
        }
    """
    # network input / placeholders for train (bn) and dropout
    x = tf.placeholder(tf.float32, input_shape, 'x')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    corrupt_prob = tf.placeholder(tf.float32, [1])

    # apply noise if denoising
    x_ = (utils.corrupt(x) * corrupt_prob + x *
          (1 - corrupt_prob)) if denoising else x

    # 2d -> 4d if convolution
    x_tensor = utils.to_tensor(x_) if convolutional else x_
    current_input = x_tensor

    Ws = []
    shapes = []

    # Build the encoder
    for layer_i, n_output in enumerate(n_filters):
        with tf.variable_scope('encoder/{}'.format(layer_i)):
            shapes.append(current_input.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(x=current_input,
                                    n_output=n_output,
                                    k_h=filter_sizes[layer_i],
                                    k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            Ws.append(W)
            current_input = h

    shapes.append(current_input.get_shape().as_list())

    with tf.variable_scope('variational'):
        if variational:
            dims = current_input.get_shape().as_list()
            flattened = utils.flatten(current_input)

            if n_hidden:
                h = utils.linear(flattened, n_hidden, name='W_fc')[0]
                h = activation(batch_norm(h, phase_train, 'fc/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = flattened

            z_mu = utils.linear(h, n_code, name='mu')[0]
            z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]

            # Sample from noise distribution p(eps) ~ N(0, 1)
            epsilon = tf.random_normal(tf.stack([tf.shape(x)[0], n_code]))

            # Sample from posterior
            z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))

            if n_hidden:
                h = utils.linear(z, n_hidden, name='fc_t')[0]
                h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = z

            size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
            h = utils.linear(h, size, name='fc_t2')[0]
            current_input = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
            if dropout:
                current_input = tf.nn.dropout(current_input, keep_prob)

            if convolutional:
                current_input = tf.reshape(
                    current_input,
                    tf.stack([
                        tf.shape(current_input)[0], dims[1], dims[2], dims[3]
                    ]))
        else:
            z = current_input

    shapes.reverse()
    n_filters.reverse()
    Ws.reverse()

    n_filters += [input_shape[-1]]

    # %%
    # Decoding layers
    for layer_i, n_output in enumerate(n_filters[1:]):
        with tf.variable_scope('decoder/{}'.format(layer_i)):
            shape = shapes[layer_i + 1]
            if convolutional:
                h, W = utils.deconv2d(x=current_input,
                                      n_output_h=shape[1],
                                      n_output_w=shape[2],
                                      n_output_ch=shape[3],
                                      n_input_ch=shapes[layer_i][3],
                                      k_h=filter_sizes[layer_i],
                                      k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            current_input = h

    y = current_input
    x_flat = utils.flatten(x)
    y_flat = utils.flatten(y)

    # l2 loss
    loss_x = tf.reduce_sum(tf.squared_difference(x_flat, y_flat), 1)

    if variational:
        # variational lower bound, kl-divergence
        loss_z = -0.5 * tf.reduce_sum(
            1.0 + 2.0 * z_log_sigma - tf.square(z_mu) -
            tf.exp(2.0 * z_log_sigma), 1)

        # add l2 loss
        cost = tf.reduce_mean(loss_x + loss_z)
    else:
        # just optimize l2 loss
        cost = tf.reduce_mean(loss_x)

    return {
        'cost': cost,
        'Ws': Ws,
        'x': x,
        'z': z,
        'y': y,
        'keep_prob': keep_prob,
        'corrupt_prob': corrupt_prob,
        'train': phase_train
    }
コード例 #15
0
ファイル: models.py プロジェクト: mariolew/TF-FaceDetection
def fcn_48_cal(dropout=False, activation=tf.nn.relu):

    imgs = tf.placeholder(tf.float32, [None, 48, 48, 3])
    labels = tf.placeholder(tf.float32, [None])

    with tf.variable_scope('cal_48'):
        conv1, _ = utils.conv2d(x=imgs,
                                n_output=64,
                                k_w=5,
                                k_h=5,
                                d_w=1,
                                d_h=1,
                                name="conv1")
        conv1 = activation(conv1)
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME",
                               name="pool1")
        conv2, _ = utils.conv2d(x=pool1,
                                n_output=64,
                                k_w=5,
                                k_h=5,
                                d_w=1,
                                d_h=1,
                                name="conv2")
        ip1, W1 = utils.conv2d(x=conv2,
                               n_output=256,
                               k_w=24,
                               k_h=24,
                               d_w=1,
                               d_h=1,
                               padding="VALID",
                               name="ip1")
        ip1 = activation(ip1)
        if dropout:
            ip1 = tf.nn.dropout(ip1, keep_prob)
        ip2, W2 = utils.conv2d(x=ip1,
                               n_output=45,
                               k_w=1,
                               k_h=1,
                               d_w=1,
                               d_h=1,
                               name="ip2")

        pred = utils.flatten(ip2)
        # target = utils.flatten(labels)
        # label_shape = labels.get_shape().as_list()
        # target = tf.reshape(labels,[label_shape[0]])
        target = labels

        cross_entropy = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                pred, tf.cast(target, tf.int64)))
        regularizer = 8e-3 * (tf.nn.l2_loss(W1) + 100 * tf.nn.l2_loss(W2))

        loss = cross_entropy + regularizer

        correct_prediction = tf.equal(tf.argmax(pred, 1),
                                      tf.cast(target, tf.int64))
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return {
            'cost': loss,
            'pred': pred,
            'accuracy': acc,
            'target': target,
            'imgs': imgs,
            'labels': labels
        }
コード例 #16
0
def VAE(input_shape=[None, 784],
        output_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        dropout=False,
        denoising=False,
        convolutional=False,
        variational=False,
        softmax=False,
        classifier='alexnet_v2'):
    """(Variational) (Convolutional) (Denoising) Autoencoder.

    Uses tied weights.

    Parameters
    ----------
    input_shape : list, optional
        Shape of the input to the network. e.g. for MNIST: [None, 784].
    n_filters : list, optional
        Number of filters for each layer.
        If convolutional=True, this refers to the total number of output
        filters to create for each layer, with each layer's number of output
        filters as a list.
        If convolutional=False, then this refers to the total number of neurons
        for each layer in a fully connected network.
    filter_sizes : list, optional
        Only applied when convolutional=True.  This refers to the ksize (height
        and width) of each convolutional layer.
    n_hidden : int, optional
        Only applied when variational=True.  This refers to the first fully
        connected layer prior to the variational embedding, directly after
        the encoding.  After the variational embedding, another fully connected
        layer is created with the same size prior to decoding.  Set to 0 to
        not use an additional hidden layer.
    n_code : int, optional
        Only applied when variational=True.  This refers to the number of
        latent Gaussians to sample for creating the inner most encoding.
    activation : function, optional
        Activation function to apply to each layer, e.g. tf.nn.relu
    dropout : bool, optional
        Whether or not to apply dropout.  If using dropout, you must feed a
        value for 'keep_prob', as returned in the dictionary.  1.0 means no
        dropout is used.  0.0 means every connection is dropped.  Sensible
        values are between 0.5-0.8.
    denoising : bool, optional
        Whether or not to apply denoising.  If using denoising, you must feed a
        value for 'corrupt_rec', as returned in the dictionary.  1.0 means no
        corruption is used.  0.0 means every feature is corrupted.  Sensible
        values are between 0.5-0.8.
    convolutional : bool, optional
        Whether or not to use a convolutional network or else a fully connected
        network will be created.  This effects the n_filters parameter's
        meaning.
    variational : bool, optional
        Whether or not to create a variational embedding layer.  This will
        create a fully connected layer after the encoding, if `n_hidden` is
        greater than 0, then will create a multivariate gaussian sampling
        layer, then another fully connected layer.  The size of the fully
        connected layers are determined by `n_hidden`, and the size of the
        sampling layer is determined by `n_code`.

    Returns
    -------
    model : dict
        {
            'cost': Tensor to optimize.
            'Ws': All weights of the encoder.
            'x': Input Placeholder
            'z': Inner most encoding Tensor (latent features)
            'y': Reconstruction of the Decoder
            'keep_prob': Amount to keep when using Dropout
            'corrupt_rec': Amount to corrupt when using Denoising
            'train': Set to True when training/Applies to Batch Normalization.
        }
    """
    # network input / placeholders for train (bn) and dropout
    x = tf.placeholder(tf.float32, input_shape, 'x')
    t = tf.placeholder(tf.float32, output_shape, 't')
    label = tf.placeholder(tf.int32, [None], 'label')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    corrupt_rec = tf.placeholder(tf.float32, name='corrupt_rec')
    corrupt_cls = tf.placeholder(tf.float32, name='corrupt_cls')

    # input of the reconstruction network
    # np.tanh(2) = 0.964
    current_input1 = utils.corrupt(x)*corrupt_rec + x*(1-corrupt_rec) \
        if (denoising and phase_train is not None) else x
    current_input1.set_shape(x.get_shape())
    # 2d -> 4d if convolution
    current_input1 = utils.to_tensor(current_input1) \
        if convolutional else current_input1

    Ws = []
    shapes = []

    # Build the encoder
    for layer_i, n_output in enumerate(n_filters):
        with tf.variable_scope('encoder/{}'.format(layer_i)):
            shapes.append(current_input1.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(x=current_input1,
                                    n_output=n_output,
                                    k_h=filter_sizes[layer_i],
                                    k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input1, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            Ws.append(W)
            current_input1 = h

    shapes.append(current_input1.get_shape().as_list())

    with tf.variable_scope('variational'):
        if variational:
            dims = current_input1.get_shape().as_list()
            flattened = utils.flatten(current_input1)

            if n_hidden:
                h = utils.linear(flattened, n_hidden, name='W_fc')[0]
                h = activation(batch_norm(h, phase_train, 'fc/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = flattened

            z_mu = utils.linear(h, n_code, name='mu')[0]
            z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]
            # modified by yidawang
            # s, u, v = tf.svd(z_log_sigma)
            # z_log_sigma = tf.matmul(
            #        tf.matmul(u, tf.diag(s)), tf.transpose(v))
            # end yidawang

            # Sample from noise distribution p(eps) ~ N(0, 1)
            epsilon = tf.random_normal(tf.stack([tf.shape(x)[0], n_code]))

            # Sample from posterior
            z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))

            if n_hidden:
                h = utils.linear(z, n_hidden, name='fc_t')[0]
                h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = z

            size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
            h = utils.linear(h, size, name='fc_t2')[0]
            current_input1 = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
            if dropout:
                current_input1 = tf.nn.dropout(current_input1, keep_prob)

            if convolutional:
                current_input1 = tf.reshape(
                    current_input1,
                    tf.stack([
                        tf.shape(current_input1)[0], dims[1], dims[2], dims[3]
                    ]))
        else:
            z = current_input1

    shapes.reverse()
    n_filters.reverse()
    Ws.reverse()

    n_filters += [input_shape[-1]]

    # %%
    # Decoding layers
    for layer_i, n_output in enumerate(n_filters[1:]):
        with tf.variable_scope('decoder/{}'.format(layer_i)):
            shape = shapes[layer_i + 1]
            if convolutional:
                h, W = utils.deconv2d(x=current_input1,
                                      n_output_h=shape[1],
                                      n_output_w=shape[2],
                                      n_output_ch=shape[3],
                                      n_input_ch=shapes[layer_i][3],
                                      k_h=filter_sizes[layer_i],
                                      k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input1, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            current_input1 = h

    y = current_input1
    t_flat = utils.flatten(t)
    y_flat = utils.flatten(y)

    # l2 loss
    loss_x = tf.reduce_mean(
        tf.reduce_sum(tf.squared_difference(t_flat, y_flat), 1))
    loss_z = 0

    if variational:
        # Variational lower bound, kl-divergence
        loss_z = tf.reduce_mean(-0.5 * tf.reduce_sum(
            1.0 + 2.0 * z_log_sigma - tf.square(z_mu) -
            tf.exp(2.0 * z_log_sigma), 1))

        # Add l2 loss
        cost_vae = tf.reduce_mean(loss_x + loss_z)
    else:
        # Just optimize l2 loss
        cost_vae = tf.reduce_mean(loss_x)

    # Alexnet for clasification based on softmax using TensorFlow slim
    if softmax:
        axis = list(range(len(x.get_shape())))
        mean1, variance1 = tf.nn.moments(t, axis) \
            if (phase_train is True) else tf.nn.moments(x, axis)
        mean2, variance2 = tf.nn.moments(y, axis)
        var_prob = variance2 / variance1

        # Input of the classification network
        current_input2 = utils.corrupt(x)*corrupt_cls + \
            x*(1-corrupt_cls) \
            if (denoising and phase_train is True) else x
        current_input2.set_shape(x.get_shape())
        current_input2 = utils.to_tensor(current_input2) \
            if convolutional else current_input2

        y_concat = tf.concat([current_input2, y], 3)
        with tf.variable_scope('deconv/concat'):
            shape = shapes[layer_i + 1]
            if convolutional:
                # Here we set the input of classification network is
                # the twice of
                # the input of the reconstruction network
                # 112->224 for alexNet and 150->300 for inception v3 and v4
                y_concat, W = utils.deconv2d(
                    x=y_concat,
                    n_output_h=y_concat.get_shape()[1] * 2,
                    n_output_w=y_concat.get_shape()[1] * 2,
                    n_output_ch=y_concat.get_shape()[3],
                    n_input_ch=y_concat.get_shape()[3],
                    k_h=3,
                    k_w=3)
                Ws.append(W)

        # The following are optional networks for classification network
        if classifier == 'squeezenet':
            predictions, net = squeezenet.squeezenet(y_concat, num_classes=13)
        elif classifier == 'zigzagnet':
            predictions, net = squeezenet.zigzagnet(y_concat, num_classes=13)
        elif classifier == 'alexnet_v2':
            predictions, end_points = alexnet.alexnet_v2(y_concat,
                                                         num_classes=13)
        elif classifier == 'inception_v1':
            predictions, end_points = inception.inception_v1(y_concat,
                                                             num_classes=13)
        elif classifier == 'inception_v2':
            predictions, end_points = inception.inception_v2(y_concat,
                                                             num_classes=13)
        elif classifier == 'inception_v3':
            predictions, end_points = inception.inception_v3(y_concat,
                                                             num_classes=13)

        label_onehot = tf.one_hot(label, 13, axis=-1, dtype=tf.int32)
        cost_s = tf.losses.softmax_cross_entropy(label_onehot, predictions)
        cost_s = tf.reduce_mean(cost_s)
        acc = tf.nn.in_top_k(predictions, label, 1)
    else:
        predictions = tf.one_hot(label, 13, 1, 0)
        label_onehot = tf.one_hot(label, 13, 1, 0)
        cost_s = 0
        acc = 0
    # Using Summaries for Tensorboard
    tf.summary.scalar('cost_vae', cost_vae)
    tf.summary.scalar('cost_s', cost_s)
    tf.summary.scalar('loss_x', loss_x)
    tf.summary.scalar('loss_z', loss_z)
    tf.summary.scalar('corrupt_rec', corrupt_rec)
    tf.summary.scalar('corrupt_cls', corrupt_cls)
    tf.summary.scalar('var_prob', var_prob)
    merged = tf.summary.merge_all()

    return {
        'cost_vae': cost_vae,
        'cost_s': cost_s,
        'loss_x': loss_x,
        'loss_z': loss_z,
        'Ws': Ws,
        'x': x,
        't': t,
        'label': label,
        'label_onehot': label_onehot,
        'predictions': predictions,
        'z': z,
        'y': y,
        'acc': acc,
        'keep_prob': keep_prob,
        'corrupt_rec': corrupt_rec,
        'corrupt_cls': corrupt_cls,
        'var_prob': var_prob,
        'train': phase_train,
        'merged': merged
    }
fig = plt.figure()
plot_res = {desc:{'ps': [], 'corrs': []} for desc in to_compare}
for desc in to_compare:

    for glom in res[reference]:
        corr, p = stats.pearsonr(res[reference][glom]['predictions'],
                                 res[desc][glom]['predictions'])
        plot_res[desc]['corrs'].append(corr)
        plot_res[desc]['ps'].append(p)

for desc, pres in plot_res.items():
    print '{} mean r: {:.2f}'.format(desc, np.mean(pres['corrs']))


ax = fig.add_subplot(1,2,1)
ax.hist(utils.flatten([v['ps'] for v in plot_res.values()]))

fig = plt.figure(figsize=(3.35, 1.8))
marker = ['o', 's', 'd']
xticks = [0, 0.2, 0.4, 0.6, 0.8]
xticklabels = ['0', '.2', '.4', '.6', '.8']
ax = fig.add_subplot(1,2,1)
ax.plot([-0.05, 0.8], [-0.05, 0.8], color='0.6')
col_ref = []
col_comp = []
for i, (desc, pres) in enumerate(plot_res.items()):
    compare_scores = np.array([res[desc][g]['score'] for g in res[desc]])
    ref_scores = np.array([res[reference][g]['score'] for g in res[reference]])
    compare_scores[compare_scores < 0] = 0
    ref_scores[ref_scores < 0] = 0
    col_ref.extend(ref_scores)