Exemplo n.º 1
0
def face_parse(image, label):

    conv = dict(kernel=3, padding='SAME')
    deconv = dict(kernel=3, padding='SAME', bias=True)
    subpixel = dict(kernel=3, factor=2, padding='SAME')

    with tf.default_args(conv=conv, deconv=deconv, subpixel=subpixel):
        net = image
        net = net.conv(16).bn().relu().conv(16).bn().relu().maxpool()
        net = net.conv(32).bn().relu().conv(32).bn().relu().maxpool()
        net = net.conv(64).bn().relu().conv(64).bn().relu().maxpool()
        net = net.conv(64).bn().relu().conv(64).bn().relu().maxpool()
        net = net.conv(64).bn().relu()
        net = net.deconv(64).bn().relu()
        net = net.subpixel().deconv(64).bn().relu().deconv(64).bn().relu()
        net = net.subpixel().deconv(64).bn().relu().deconv(64).bn().relu()
        net = net.subpixel().deconv(32).bn().relu().deconv(32).bn().relu()
        net = net.subpixel().deconv(16).bn().relu().deconv(11, bias=True)

        prob = net.softmax()
        summary_parse(prob)

        losses = tf.softmax_cross_entropy(net, label, name='losses')
        loss = losses.mean()

    return tf.dic(losses=losses, loss=loss, outputs=[prob])
Exemplo n.º 2
0
def dataset_train(batch, shape=None, folder=None):
    import os
    from scipy.io import loadmat

    # load .mat file
    folder = folder or _folder()
    fpath = os.path.join(folder, 'train_32x32.mat')
    data = loadmat(fpath)
    x = data['X']  #
    y = data['y']  # 1~10 ?

    x = x.transpose((3, 0, 1, 2))
    y[y == 10] = 0
    n = x.shape[0]  # total data

    # convert to tensors
    x = tf.convert_to_tensor(x)  # uint8 (73257, 32, 32, 3)
    y = tf.convert_to_tensor(y)  # uint8 (73257, 1)

    # random select
    i = tf.random_uniform((batch, ), minval=0, maxval=n, dtype=tf.int32)
    img = tf.gather(x, i).to_float() / 255.
    label = tf.gather(y, i).to_int32()

    if shape is not None:
        img = tf.image.resize_images(img, shape)

    return tf.dic(image=img, label=label)
Exemplo n.º 3
0
def trainset(batch, size=(256, 256), threads=8, sizedown=None, removebg=False):
    import sflow.tf as tf

    # qtrain : from train
    with tf.name_scope('helen_trainset'):
        # fid = tf.feed.read_line(_trainfile(), shuffle=True)
        line = tf.feed.read_line(_trainfile(), shuffle=True)
        _, fid = tf.decode_csv(line, [[0], ['']], field_delim=',')
        trimed = tf.string_split([fid], delimiter=' ')  # remove space
        # now trimed.values[0] has fid
        fid = trimed.values[0]  # ex) '100032540_1'

        qtrain = feed_img_label(fid, size, shuffle=True, threads=threads)

        image, label = qtrain.dequeue_many(batch)
        image = image.to_float() / 255.
        image = tf.identity(image, name='image')
        label = label.to_float() / 255.

        if sizedown is not None:
            image = image.sizedown(sizedown)
            label = label.sizedown(sizedown)
        label = normalize_label(label)

        if removebg is True:
            # remove background image
            bg = 1. - label[:, :, :, :1]
            image = image * bg

    return tf.dic(image=image, label=label)
Exemplo n.º 4
0
def testset(batch, size=(256, 256), threads=8, sizedown=None, removebg=False):
    import sflow.tf as tf

    with tf.name_scope('helen_testset'):
        # qtest : from testing.txt format , seperated
        line = tf.feed.read_line(_testfile())
        _, fid = tf.decode_csv(line, [[0], ['']], field_delim=',')
        trimed = tf.string_split([fid], delimiter=' ')  # remove space
        # now trimed.values[0] has fid
        fid = trimed.values[0]  # ex) '100032540_1'
        qtest = feed_img_label(fid,
                               size,
                               shuffle=False,
                               rotate=False,
                               threads=threads)

        data = qtest.dequeue_many(batch)
        image = data[0].to_float() / 255.
        label = data[1].to_float() / 255.

        if sizedown is not None:
            image = image.sizedown(sizedown)
            label = label.sizedown(sizedown)
        label = normalize_label(label)

        if removebg is True:
            # remove background image
            bg = 1. - label[:, :, :, :1]
            image = image * bg

    return tf.dic(image=image, label=label)
Exemplo n.º 5
0
def dataset_test(batch, shuffle=True, size=(28, 28), folder=None):

    if not shuffle:
        raise NotImplementedError

    _, (img, label) = _load_data(size, folder)
    with tf.name_scope('dataset_mnist_test'):
        img = tf.constant(img)
        label = tf.constant(label)
        ind = tf.random_uniform((batch, ),
                                minval=0,
                                maxval=img.dims[0],
                                dtype=tf.int32)

        x = tf.gather(img, ind)
        y = tf.gather(label, ind)

        if x.ndim == 3:
            x = x.expand_dims(3)
        if x.dtype != tf.float32:
            x = x.to_float() / 255.

        y = y.to_int32()

    return tf.dic(image=x, label=y)
Exemplo n.º 6
0
def trainset_old(batch,
                 size=(256, 256),
                 threads=8,
                 sizedown=None,
                 removebg=False):
    import sflow.tf as tf

    # qtrain : from train
    with tf.name_scope('helen_trainset'):
        fid = tf.feed.read_line(_trainfile(), shuffle=True)
        qtrain = feed_img_label(fid, size, shuffle=True, threads=threads)

        image, label = qtrain.dequeue_many(batch)
        image = image.to_float() / 255.
        image = tf.identity(image, name='image')
        label = label.to_float() / 255.

        if sizedown is not None:
            image = image.sizedown(sizedown)
            label = label.sizedown(sizedown)
        label = normalize_label(label)

        if removebg is True:
            # remove background image
            bg = 1. - label[:, :, :, :1]
            image = image * bg

    return tf.dic(image=image, label=label, batch=batch)
Exemplo n.º 7
0
def dataset_emotion(batch, threads=8, shuffle=None, capacity=10, folder=None):
    """

    :param batch:
    :param threads:
    :param shuffle:
    :param capacity:
    :param folder:
    :return: dict(image, label)
    """

    with tf.name_scope('fer2013.emotion'):
        # face image 48x48x1
        img = tf.placeholder(tf.float32, shape=(48, 48, 1), name='image')
        # emotion label
        label = tf.placeholder(tf.int32, shape=(), name='emotion')
        placeholders = [img, label]
        q = tf.feed.gen_producer(placeholders,
                                 gen_random_face(folder),
                                 capacity=capacity,
                                 threads=threads,
                                 shuffle=shuffle)

        d = q.dequeue_many(batch)

    return tf.dic(image=d[0], label=d[1])
Exemplo n.º 8
0
def attr_dataset(batch, attr, value, size=None, crop=None,
                 threads=8, shuffle=None,
                 partition='train', folder=None, fliplr=False):
    """
    get data set given attribute data
    :param batch:
    :param attr:
    :param value:
    :param size:
    :param crop:
    :param threads:
    :param shuffle:
    :param partition:
    :param folder:
    :param fliplr:
    :return:
    """
    folder = folder or datasetfolder()
    shuffle = shuffle or (partition == 'train')
    attr = attr.lower()
    files = _pair_list_file(attr, folder=folder, partition=partition)
    if not (os.path.exists(files[0]) and os.path.exists(files[1])):
        _prepare_attr_pair_list(attr, folder=folder, partition=partition)

    if value is False:
        filelist = files[0]
    else:
        filelist = files[1]

    imgfolder = os.path.join(folder, 'Img/img_align_celeba/')
    imgfolder = tf.constant(imgfolder)

    with tf.name_scope(None, 'celeba.attr.{0}.{1}'.format(attr, value)):
        line = tf.feed.read_line(filelist)
        line = line.print('line', first_n=10)
        fields = tf.decode_csv(line, [['']] + [[-1]]*40, field_delim=' ')
        fname = fields[0]
        fname = imgfolder + fname
        img = tf.feed.read_image(fname, channels=3)

        attrs = tf.stack(fields[1:])
        attrs = tf.equal(attrs, 1).to_float()

        capacity = 512
        shapes = [(218, 178, 3), (40,)]
        q = tf.feed.queue_producer([img, attrs], capacity, shapes=shapes,
                                   threads=threads, shuffle=shuffle)
        img, attrs = q.dequeue_many(batch)

        img = img.to_float()/255.
        crop = crop or (178, 178)
        if crop is not None:
            img = tf.img.crop_center(crop, img)
        if size is not None:
            img = tf.image.resize_images(img, size)
        if fliplr:
            img = tf.img.rand_fliplr(img, p=0.5)

    return tf.dic(image=img, label=attrs, batch=batch)
Exemplo n.º 9
0
def dataset_train(batch, **kwargs):
    """
    sketch collection to face photo collection
    :param batch:
    :param kwargs:
    :return:
    """
    a = dataset_trainA(batch, **kwargs)
    b = dataset_trainB(batch, **kwargs)
    return tf.dic(A=a, B=b, batch=batch)
Exemplo n.º 10
0
def attribute_dataset(batch,
                      size=None,
                      crop=None,
                      threads=8,
                      shuffle=None,
                      partition='train',
                      folder=None,
                      fliplr=False):

    folder = folder or datasetfolder()
    # assert size is None  # not implemented size

    attrfile = 'list_attr_{0}.txt'.format(partition)
    shuffle = shuffle or (partition == 'train')
    trainfile = os.path.join(folder, 'Anno', attrfile)
    imgfolder = os.path.join(folder, 'Img/img_align_celeba/')
    # imgfolder = os.path.join(folder, 'process/resized/')

    imgfolder = tf.constant(imgfolder)

    with tf.name_scope('celeba_attribute'):
        line = tf.feed.read_line(trainfile)

        line = line.print('line', first_n=10)

        fields = tf.decode_csv(line, [['']] + [[-1]] * 40, field_delim=' ')
        fname = fields[0]
        fname = imgfolder + fname
        img = tf.feed.read_image(fname, channels=3)

        attrs = tf.stack(fields[1:])
        attrs = tf.equal(attrs, 1).to_float()

        capacity = 512
        shapes = [(218, 178, 3), (40, )]
        q = tf.feed.queue_producer([img, attrs],
                                   capacity,
                                   shapes=shapes,
                                   threads=threads,
                                   shuffle=shuffle)
        img, attrs = q.dequeue_many(batch)
        # img = tf.img.rand_fliplr(img, p=0.5)

        img = img.to_float() / 255.
        if size is not None:
            img = tf.image.resize_images(img, size)
        if crop is not None:
            img = tf.img.crop_center(crop, img)
        if fliplr:
            img = tf.img.rand_fliplr(img, p=0.5)

    return tf.dic(image=img, label=attrs, batch=batch)
Exemplo n.º 11
0
def attr_pair(batch, attr, size=None, crop=None, threads=8,
              shuffle=None, partition='train',
              folder=None, fliplr=False):
    size = size or (128, 128)

    data0 = attr_dataset(batch, attr, value=False, size=size, crop=crop, threads=threads,
                         shuffle=shuffle, partition=partition,
                         folder=folder, fliplr=fliplr)

    data1 = attr_dataset(batch, attr, value=True, size=size, crop=crop, threads=threads,
                         shuffle=shuffle, partition=partition,
                         folder=folder, fliplr=fliplr)

    return tf.dic(x0=data0.image, x1=data1.image)
Exemplo n.º 12
0
def face_parse2(image, label):

    conv = dict(kernel=3, padding='SAME')
    deconv = dict(kernel=3, padding='SAME', bias=True)
    # subpixel = dict(kernel=3, factor=2, padding='SAME')
    maxpool_where = dict(kernel=2)
    unpool_where = dict(kernel=2)

    with tf.default_args(conv=conv,
                         deconv=deconv,
                         maxpool_where=maxpool_where,
                         unpool_where=unpool_where):
        net = image
        wheres = []
        net, where = net.conv(16).bn().relu().conv(
            16).bn().relu().maxpool_where()
        wheres.append(where)
        net, where = net.conv(32).bn().relu().conv(
            32).bn().relu().maxpool_where()
        wheres.append(where)
        net, where = net.conv(64).bn().relu().conv(
            64).bn().relu().maxpool_where()
        wheres.append(where)
        net, where = net.conv(64).bn().relu().conv(
            64).bn().relu().maxpool_where()
        wheres.append(where)
        net = net.conv(64).bn().relu()
        net = net.deconv(64).bn().relu()
        net = net.unpool_where(
            wheres.pop()).deconv(64).bn().relu().deconv(64).bn().relu()
        net = net.unpool_where(
            wheres.pop()).deconv(64).bn().relu().deconv(32).bn().relu()
        net = net.unpool_where(
            wheres.pop()).deconv(32).bn().relu().deconv(16).bn().relu()
        net = net.unpool_where(wheres.pop()).deconv(16).bn().relu().deconv(
            11, bias=True)

        net = tf.summary_activation(net, name='logits')
        prob = net.softmax()

        summary_parse(prob)

        # losses = tf.nn.sigmoid_cross_entropy_with_logits(net, label)
        losses = tf.softmax_cross_entropy(net, label, name='losses')
        loss = losses.mean()

    return tf.dic(loss=loss, logits=net, label=label, image=image)
Exemplo n.º 13
0
def attribute(batch, size=None, threads=8):
    """
    add comment size, fields
    :param batch:
    :param size:
    :param threads:
    :return:
    """

    return tf.dic(
        train=attribute_dataset(batch,
                                size=size,
                                threads=threads,
                                partition='train'),
        valid=attribute_dataset(batch,
                                size=size,
                                threads=threads,
                                partition='valid'),
    )
Exemplo n.º 14
0
def dataset_train(batch, size=(28, 28), folder=None):

    (img, label), _ = _load_data(size, folder)
    with tf.name_scope('dataset_mnist'):
        # train.shape == (60000, 28, 28)

        img = tf.constant(img)
        label = tf.constant(label)
        ind = tf.random_uniform((batch, ),
                                minval=0,
                                maxval=img.dims[0],
                                dtype=tf.int32)

        x = tf.gather(img, ind)
        y = tf.gather(label, ind)

        if x.ndim == 3:
            x = x.expand_dims(3)
        if x.dtype != tf.float32:
            x = x.to_float() / 255.

        y = y.to_int32()

    return tf.dic(image=x, label=y)
Exemplo n.º 15
0
def dataset_train(batch, **kwargs):
    a = dataset_pair(batch, 'train', shuffle=True, **kwargs)
    b = dataset_pair(batch, 'train', shuffle=True, **kwargs)

    return tf.dic(A=a.A, B=b.B, batch=batch)
Exemplo n.º 16
0
def dataset_train(batch, **kwargs):
    a = dataset_trainA(batch, **kwargs)
    b = dataset_trainB(batch, **kwargs)
    return tf.dic(A=a, B=b, batch=batch)
Exemplo n.º 17
0
def dataset_pair_val(batch, **kwargs):
    data = dataset_reader_AB(batch, _folder(), 'val', **kwargs).split(2,
                                                                      axis=2)

    return tf.dic(A=data[0], B=data[1], batch=batch)
Exemplo n.º 18
0
def dataset_valid(batch, **kwargs):
    a = dataset_valA(batch, **kwargs)
    b = dataset_valB(batch, **kwargs)
    return tf.dic(A=a, B=b, batch=batch)
Exemplo n.º 19
0
def dataset_test(batch, **kwargs):
    a = dataset_testA(batch, **kwargs)
    b = dataset_testB(batch, **kwargs)
    return tf.dic(A=a, B=b, batch=batch)
Exemplo n.º 20
0
def dataset_pair(batch, partition, **kwargs):
    shape = (256, 512, 3)
    data = dataset_reader(batch, _folder(), partition, shape=shape, **kwargs)
    a, b = tf.split(data, 2, axis=2)
    return tf.dic(A=a, B=b, batch=batch)