def read_and_decode(filename_queue, batch_size):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'height':
            tf.FixedLenFeature([], tf.int64),
            'word_opinion':
            tf.FixedLenFeature([Hp.w_maxlen * 6],
                               dtype=tf.int64,
                               default_value=[-1] * Hp.w_maxlen * 6),
            'char_opinion':
            tf.FixedLenFeature([], tf.string)
        })

    char_opinion = tf.decode_raw(features['char_opinion'], tf.uint8)
    height = tf.cast(features['height'], tf.int32)
    word_opinion = tf.cast(features['word_opinion'], tf.int32)

    char_opinion = tf.reshape(char_opinion, tf.stack([6, Hp.c_maxlen]))
    word_opinion = tf.reshape(word_opinion, tf.stack([6, Hp.w_maxlen]))
    words, chars = tf.train.batch([word_opinion, char_opinion],
                                  batch_size=batch_size,
                                  capacity=3 * batch_size,
                                  num_threads=1)

    return (words, chars)
Esempio n. 2
0
def chamfer_loss(A,B):    
    r=tf.reduce_sum(A*A,2)
    r=tf.reshape(r,[int(r.shape[0]),int(r.shape[1]),1])
    r2=tf.reduce_sum(B*B,2)
    r2=tf.reshape(r2,[int(r.shape[0]),int(r.shape[1]),1])
    t=(r-2*tf.matmul(A, tf.transpose(B,perm=[0, 2, 1])) + tf.transpose(r2,perm=[0, 2, 1]))
    return tf.reduce_mean((tf.reduce_min(t, axis=1)+tf.reduce_min(t,axis=2))/2.0)
Esempio n. 3
0
def sg_softmax(x, opt):
    dim = x.get_shape().ndims
    assert dim >= 2, 'softmax needs rank 2 at least'
    if dim == 2:
        return tf.nn.softmax(x, name=opt.name)
    else:
        ori_shape = [-1] + x.get_shape().as_list()[1:]
        new_shape = (-1, ori_shape[-1])
        return tf.reshape(
            tf.nn.softmax(tf.reshape(x, new_shape), name=opt.name), ori_shape)
Esempio n. 4
0
        def ln(xx, opt):
            if opt.ln:
                # calc layer mean, variance for final axis
                mean, variance = tf.nn.moments(xx, axes=[len(xx.get_shape()) - 1])

                # apply layer normalization ( explicit broadcasting needed )
                broadcast_shape = [-1] + [1] * (len(xx.get_shape()) - 1)
                xx = (xx - tf.reshape(mean, broadcast_shape)) \
                         / tf.reshape(tf.sqrt(variance + tf.sg_eps), broadcast_shape)

                # apply parameter
                return gamma * xx + beta
Esempio n. 5
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell(
            [lstm_cell(is_test) for _ in range(num_blocks)],
            state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell(
            [lstm_cell(is_test) for _ in range(num_blocks)],
            state_is_tuple=True)

        words_used_in_sent = tf.sign(
            tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(
            tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell,
                                                     bw_cell,
                                                     x,
                                                     dtype=tf.float32,
                                                     sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction,
                         [x.get_shape().as_list()[0], -1, num_classes])

    return res
        def symbols_to_logits_fn(ids, dec_state):
            dec = []
            dec_c, dec_h = [], []
            # (batch x beam_size x decoded_seq)
            ids = tf.reshape(ids, [Hp.batch_size, beam_size, -1])
            print("dec_state ", dec_state[0].get_shape().as_list())
            for ind in range(beam_size):
                with tf.variable_scope('dec_lstm', reuse=ind > 0
                                       or reuse_vars):
                    w_input = ids[:, ind, -1].sg_lookup(emb=emb_word)
                    dec_state0 = tf.contrib.rnn.LSTMStateTuple(
                        c=dec_state.c[:, ind, :], h=dec_state.h[:, ind, :])
                    dec_out, dec_state_i = dec_cell(w_input, dec_state0)
                    dec_out = tf.expand_dims(dec_out, 1)
                dec_i = dec_out.sg_conv1d_gpus(size=1,
                                               dim=Hp.word_vs,
                                               name="out_conv",
                                               act="linear",
                                               dev=dev,
                                               reuse=ind > 0 or reuse_vars)

                dec.append(tf.squeeze(dec_i, 1))
                dec_c.append(dec_state_i[0])
                dec_h.append(dec_state_i[1])
            return tf.stack(dec, 1), tf.contrib.rnn.LSTMStateTuple(
                tf.stack(dec_c, 1), tf.stack(dec_h, 1))
Esempio n. 7
0
def read_from_tfrecords(tfFileDirName,
                        varNames,
                        sizeBatch,
                        shape,
                        shuffle=True,
                        rs=888):
    """
    example:
    read_from_tfrecords('./Data/digits.tfrecords',['x','y'],32,[[28,28],[1]])

    return: list of tensors. (this function should be only used in tensorflow codes)
    """
    varNames = list(varNames)
    tmp = [np.asarray(i, dtype=np.int32) for i in shape]
    shape = []
    for i in tmp:
        if np.sum(np.shape(i)) > 1:
            shape.append(list(i))
        else:
            shape.append([int(i)])
    print(shape)
    filename_queue = tf.train.string_input_producer([tfFileDirName])
    print(filename_queue)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    tmpFeatures = {}
    for ii in varNames:
        tmpFeatures[ii] = tf.FixedLenFeature([], tf.string)
    tmpFeatures = tf.parse_single_example(serialized_example,
                                          features=tmpFeatures)
    tmpVar = []
    for i in range(len(varNames)):
        ii = varNames[i]
        tmp = tf.decode_raw(tmpFeatures[ii], tf.float32)
        tmp = tf.reshape(tmp, shape=list(shape[i]))
        tmpVar.append(tmp)
    print(tmpVar)

    # Trouble caused here
    if shuffle:
        tmpBatch = tf.train.shuffle_batch(tmpVar,
                                          sizeBatch,
                                          capacity=sizeBatch * 128,
                                          min_after_dequeue=sizeBatch * 32,
                                          name=None,
                                          seed=rs)
    else:
        tmpBatch = tf.train.batch(tmpVar,
                                  sizeBatch,
                                  capacity=sizeBatch * 128,
                                  name=None)

    print(tmpBatch)
    return tmpBatch
Esempio n. 8
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)

        words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes])

    return res
Esempio n. 9
0
def sg_reshape(tensor, opt):
    r"""Reshapes a tensor.
    
    See `tf.reshape()` in tensorflow.

    Args:
      tensor: A `Tensor` (automatically given by chain).
      opt:
        shape: A tuple/list of integers. The destination shape.
        name: If provided, replace current tensor's name.

    Returns:
      A `Tensor`.
    """
    assert opt.shape is not None, 'shape is mandatory.'
    return tf.reshape(tensor, opt.shape, name=opt.name)
Esempio n. 10
0
def sg_flatten(tensor, opt):
    r"""Reshapes a tensor to `batch_size x -1`.
    
    See `tf.reshape()` in tensorflow.

    Args:
      tensor: A `Tensor` (automatically given by chain).
      opt:
        name: If provided, it replaces current tensor's name.

    Returns:
      A 2-D tensor.

    """
    dim = np.prod(tensor.get_shape().as_list()[1:])
    return tf.reshape(tensor, [-1, dim], name=opt.name)
Esempio n. 11
0
def sg_hinge(tensor, opt):
    assert opt.target is not None, 'target is mandatory.'

    # default margin
    opt += tf.sg_opt(margin=1)

    # reshape target
    shape = tensor.get_shape().as_list()
    broadcast_shape = [-1] + [1] * (len(shape) - 2) + [shape[-1]]
    target = tf.cast(tf.reshape(opt.target, broadcast_shape), tf.sg_floatx)

    # hinge loss
    out = tf.identity(tf.maximum(opt.margin - target * tensor, 0), 'hinge')

    # add summary
    tf.sg_summary_loss(out)

    return out
Esempio n. 12
0
def sg_hinge(tensor, opt):
    r"""Returns hinge loss between `tensor` and `target`.
    
    Args:
      tensor: A `Tensor`.
      opt:
        target: A `Tensor`. Labels.
        margin: An int. Maximum margin. Default is 1.
        name: A `string`. A name to display in the tensor board web UI.
      
    Returns:
      A `Tensor`.
    
    For example,
    
    ```
    tensor = [[30, 10, 40], [13, 30, 42]]
    target = [[0, 0, 1], [0, 1, 0]]
    tensor.sg_hinge(target=target, one_hot=True) =>     [[ 1.  1.  0.]
                                                         [ 1.  0.  1.]]
    ```
    """
    assert opt.target is not None, 'target is mandatory.'

    # default margin
    opt += tf.sg_opt(margin=1)

    # reshape target
    shape = tensor.get_shape().as_list()
    broadcast_shape = [-1] + [1] * (len(shape) - 2) + [shape[-1]]
    target = tf.cast(tf.reshape(opt.target, broadcast_shape), tf.sg_floatx)

    # hinge loss
    out = tf.identity(tf.maximum(opt.margin - target * tensor, 0), 'hinge')

    # add summary
    tf.sg_summary_loss(out, name=opt.name)

    return out
Esempio n. 13
0
pngName = 'sample.png'

png = tf.read_file(pngName)
#png.thumbnail(size, Image.ANTIALIAS)
#png = tf.resize(png1, (14,14))
myPNG = tf.image.decode_png(png)
#myPNG = tf.image.resize_images(myPNG1,size)
#myPNG = tf.image.resize_bicubic(myPNG1, (14,14))
#zy = tf.expand_dims(myPNG,3)
#x = tf.reshape(myPNG, [1,28,28,1])
#y = tf.to_float(myPNG, name ='ToFloat')
#x = tf.reshape(y, [1,28,28,1])

y = convert_image('sample.png')
x = tf.reshape(y, [1, 28, 28, 1])

print("y:")
print(x)
# corrupted image
x_small = tf.image.resize_bicubic(x, (14, 14))
x_bicubic = tf.image.resize_bicubic(x_small, (28, 28)).sg_squeeze()
x_nearest = tf.image.resize_images(
    x_small, (28, 28), tf.image.ResizeMethod.NEAREST_NEIGHBOR).sg_squeeze()

#
# create generator
#
# I've used ESPCN scheme
# http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf
#
Esempio n. 14
0
def sg_reshape(tensor, opt):
    assert opt.shape is not None, 'shape is mandatory.'
    return tf.reshape(tensor, opt.shape, name=opt.name)
Esempio n. 15
0
# PARAMS #
num_layers = 3
activation = tf.nn.relu
batchsize = 8
w_dropout = 0.4
path_to_train_data = "__data/stage1_train"
path_to_test_data = "__data/stage1_test"

img_dat = util.crawl_path(path_to_train_data)

graph = tf.Graph()
with graph.as_default():
    #batchsize*x_max*y_max*rgb(3)
    features = tf.placeholder(tf.float32, [None, None, None, 3])
    labels = tf.placeholder(tf.int32, [None, None])
    input_layer = tf.reshape(features, [-1, 2048, 2048, 3])
    training = tf.placeholder(tf.bool)

    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=32,
                             kernel_size=[11, 11],
                             padding="same",
                             activation=tf.nn.relu)
    print('conv1', conv1.get_shape())
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 4], strides=2)
    print('pool1', pool1.get_shape())
    conv2 = tf.layers.conv2d(inputs=input_layer,
                             filters=64,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu)
Esempio n. 16
0
            inputs = tf.placeholder(tf.float32, [None, None, num_mfccs * 2])
            targets = tf.sparse_placeholder(tf.int32)

        # Stacking rnn cells
        with tf.name_scope('cellStack'):
            stack = tf.contrib.rnn.MultiRNNCell(
                [lstm_cell() for _ in range(num_layers)], state_is_tuple=True)
            outputs, _ = tf.nn.dynamic_rnn(stack,
                                           inputs,
                                           seq_len,
                                           dtype=tf.float32)
        shape = tf.shape(inputs)
        batch_s, TF_max_timesteps = shape[0], shape[1]

        with tf.name_scope('outputs'):
            outputs = tf.reshape(outputs, [-1, num_hidden])

        with tf.name_scope('weights'):
            W = tf.Variable(tf.truncated_normal([num_hidden, num_classes],
                                                stddev=0.1),
                            name='weights')
        with tf.name_scope('biases'):
            b = tf.get_variable("b",
                                initializer=tf.constant(0.,
                                                        shape=[num_classes]))

        with tf.name_scope('logits'):
            logits = tf.matmul(outputs, W) + b
            logits = tf.reshape(logits, [batch_s, -1, num_classes])
            logits = tf.transpose(logits, (1, 0, 2), name="out/logits")
        with tf.name_scope('loss'):
Esempio n. 17
0
    def __init__(self,
                 x,
                 y,
                 num_batch,
                 vocab_size,
                 emb_dim,
                 hidden_dim,
                 max_ep=240,
                 infer_shape=(1, 1),
                 mode="train"):

        self.num_batch = num_batch
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = vocab_size
        self.max_len_infer = 512
        self.max_ep = max_ep

        # reuse = len([t for t in tf.global_variables() if t.name.startswith('gen')]) > 0
        reuse = (mode == 'infer')

        if mode == "train":
            self.x = x
            self.y = y
        elif mode == "infer":
            self.x = tf.placeholder(tf.int32, shape=infer_shape)
            self.y = tf.placeholder(tf.int32, shape=infer_shape)

        with tf.variable_scope("gen_embs", reuse=reuse):
            self.emb_x = tf.get_variable("emb_x",
                                         [self.vocab_size, self.emb_dim])
            self.emb_y = tf.get_variable("emb_y",
                                         [self.vocab_size, self.emb_dim])
            self.X = tf.nn.embedding_lookup(self.emb_x, self.x)
            self.Y = tf.nn.embedding_lookup(self.emb_y, self.y)

        with tf.sg_context(name='gen', reuse=reuse):
            #     self.emb_x = tf.Variable(tf.random_uniform([self.vocab_size, self.emb_dim], 0.0, 1.0), name="emb_x")
            #     self.emb_y = tf.Variable(tf.random_uniform([self.vocab_size, self.emb_dim], 0.0, 1.0), name="emb_y")
            # self.emb_x = tf.sg_emb(name='emb_x', voca_size=self.vocab_size, dim=self.emb_dim)  # (68,16)
            # self.emb_y = tf.sg_emb(name='emb_y', voca_size=self.vocab_size, dim=self.emb_dim)  # (68,16)
            # self.X = self.x.sg_lookup(emb=self.emb_x)  # (8,63,16)
            # self.Y = self.y.sg_lookup(emb=self.emb_y)  # (8,63,16)

            if mode == "train":
                self.lstm_layer = self.X.sg_lstm(in_dim=self.emb_dim,
                                                 dim=self.vocab_size,
                                                 name="lstm")  # (8, 63, 68)
                self.test = self.lstm_layer.sg_softmax(name="testtt")

                print "mazum??"
                print self.test

            elif mode == "infer":
                self.lstm_layer = self.X.sg_lstm(in_dim=self.emb_dim,
                                                 dim=self.vocab_size,
                                                 last_only=True,
                                                 name="lstm")
                self.log_prob = tf.log(self.lstm_layer)

                # next_token: select by distribution probability, preds: select by argmax

                self.multinormed = tf.multinomial(self.log_prob, 1)
                self.next_token = tf.cast(
                    tf.reshape(tf.multinomial(self.log_prob, 1),
                               [1, infer_shape[0]]), tf.int32)
                self.preds = self.lstm_layer.sg_argmax()

        if mode == "train":
            self.loss = self.lstm_layer.sg_ce(target=self.y)
            self.istarget = tf.not_equal(self.y, 0).sg_float()

            self.reduced_loss = (self.loss.sg_sum()) / (
                self.istarget.sg_sum() + 0.0000001)
            tf.sg_summary_loss(self.reduced_loss, "reduced_loss")
Esempio n. 18
0
def cnn(features, labels):
    input_layer = tf.reshape(features, [-1, 28, 3])
Esempio n. 19
0
def sg_flatten(tensor, opt):
    dim = np.prod(tensor.get_shape().as_list()[1:])
    return tf.reshape(tensor, [-1, dim], name=opt.name)
Esempio n. 20
0
def main(argv):
    # set log level to debug
    tf.sg_verbosity(10)

    #
    # hyper parameters
    #

    size = 160, 147
    batch_size = 1  # batch size

    #
    # inputs
    #

    pngName = argv

    png = tf.read_file(pngName)
    #png.thumbnail(size, Image.ANTIALIAS)
    #png = tf.resize(png1, (14,14))
    myPNG = tf.image.decode_png(png)

    y = convert_image(pngName)
    x = tf.reshape(y, [1, 28, 28, 1])

    print(x)
    # corrupted image
    x_small = tf.image.resize_bicubic(x, (14, 14))
    x_bicubic = tf.image.resize_bicubic(x_small, (28, 28)).sg_squeeze()
    x_nearest = tf.image.resize_images(
        x_small, (28, 28),
        tf.image.ResizeMethod.NEAREST_NEIGHBOR).sg_squeeze()

    #
    # create generator
    #
    # I've used ESPCN scheme
    # http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf
    #

    # generator network
    with tf.sg_context(name='generator', act='relu', bn=True):
        gen = (x.sg_conv(dim=32).sg_conv().sg_conv(
            dim=4, act='sigmoid',
            bn=False).sg_periodic_shuffle(factor=2).sg_squeeze())

    #
    # run generator
    #
    fileName = "inPython.png"
    fig_name = "genImages/" + fileName
    #fig_name2 = 'asset/train/sample2.png'

    print("start")
    with tf.Session() as sess:
        with tf.sg_queue_context(sess):

            tf.sg_init(sess)

            # restore parameters
            saver = tf.train.Saver()
            #saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))
            saver.restore(
                sess, tf.train.latest_checkpoint('python/asset/train/ckpt'))

            # run generator
            gt, low, bicubic, sr = sess.run(
                [x.sg_squeeze(), x_nearest, x_bicubic, gen])

            # plot result
            #sr[0].thumbnail(size, Image.ANTIALIAS)
            plt.figure(figsize=(1, 1))
            #plt.set_axis_off()
            hr = plt.imshow(sr[0], 'gray')
            plt.axis('tight')
            plt.axis('off')
            #ax.set_axis_off()
    #ax.thumbnail(size, Image.ANTIALIAS)

    #plt.savefig(fig_name,bbox_inches='tight',pad_inches=0,dpi=600)
        plt.savefig(fig_name, dpi=600)
        #tf.sg_info('Sample image saved to "%s"' % fig_name)
        plt.close()

        ##print (type (sr[0]))
        ##sourceImage = Image.fromarray(np.uint8(sr[0])

    print("done")