def testset(batch, size=(256, 256), threads=8, sizedown=None, removebg=False): import sflow.tf as tf with tf.name_scope('helen_testset'): # qtest : from testing.txt format , seperated line = tf.feed.read_line(_testfile()) _, fid = tf.decode_csv(line, [[0], ['']], field_delim=',') trimed = tf.string_split([fid], delimiter=' ') # remove space # now trimed.values[0] has fid fid = trimed.values[0] # ex) '100032540_1' qtest = feed_img_label(fid, size, shuffle=False, rotate=False, threads=threads) data = qtest.dequeue_many(batch) image = data[0].to_float() / 255. label = data[1].to_float() / 255. if sizedown is not None: image = image.sizedown(sizedown) label = label.sizedown(sizedown) label = normalize_label(label) if removebg is True: # remove background image bg = 1. - label[:, :, :, :1] image = image * bg return tf.dic(image=image, label=label)
def dataset(batch, size=None, crop=None, folder=None, capacity=512, threads=8, shuffle=None, partition='train', fliplr=False): folder = folder or _asset_folder() shuffle = shuffle or (partition == 'train') attrfile = 'list_attr_{0}.txt'.format(partition) trainfile = os.path.join(folder, 'Anno', attrfile) imgfolder = os.path.join(folder, 'Img/img_align_celeba/') imgfolder = tf.constant(imgfolder) with tf.name_scope('celeba'): line = tf.feed.read_line(trainfile) line = line.print('line', first_n=10) fields = tf.decode_csv(line, [['']] + [[-1]]*40, field_delim=' ') fname = fields[0] fname = imgfolder + fname img = tf.feed.read_image(fname, channels=3) # capacity = 512 shapes = [(218, 178, 3)] q = tf.feed.queue_producer([img], capacity, shapes=shapes, threads=threads, shuffle=shuffle) img = q.dequeue_many(batch) # face only exclude attributes img = img.to_float()/255. img = tf.img.crop_center(crop or (178, 178), img) img = tf.image.resize_images(img, size or (256, 256)) if fliplr: img = tf.img.rand_fliplr(img, p=0.5) return img
def trainset_old(batch, size=(256, 256), threads=8, sizedown=None, removebg=False): import sflow.tf as tf # qtrain : from train with tf.name_scope('helen_trainset'): fid = tf.feed.read_line(_trainfile(), shuffle=True) qtrain = feed_img_label(fid, size, shuffle=True, threads=threads) image, label = qtrain.dequeue_many(batch) image = image.to_float() / 255. image = tf.identity(image, name='image') label = label.to_float() / 255. if sizedown is not None: image = image.sizedown(sizedown) label = label.sizedown(sizedown) label = normalize_label(label) if removebg is True: # remove background image bg = 1. - label[:, :, :, :1] image = image * bg return tf.dic(image=image, label=label, batch=batch)
def trainset(batch, size=(256, 256), threads=8, sizedown=None, removebg=False): import sflow.tf as tf # qtrain : from train with tf.name_scope('helen_trainset'): # fid = tf.feed.read_line(_trainfile(), shuffle=True) line = tf.feed.read_line(_trainfile(), shuffle=True) _, fid = tf.decode_csv(line, [[0], ['']], field_delim=',') trimed = tf.string_split([fid], delimiter=' ') # remove space # now trimed.values[0] has fid fid = trimed.values[0] # ex) '100032540_1' qtrain = feed_img_label(fid, size, shuffle=True, threads=threads) image, label = qtrain.dequeue_many(batch) image = image.to_float() / 255. image = tf.identity(image, name='image') label = label.to_float() / 255. if sizedown is not None: image = image.sizedown(sizedown) label = label.sizedown(sizedown) label = normalize_label(label) if removebg is True: # remove background image bg = 1. - label[:, :, :, :1] image = image * bg return tf.dic(image=image, label=label)
def dataset_test(batch, shuffle=True, size=(28, 28), folder=None): if not shuffle: raise NotImplementedError _, (img, label) = _load_data(size, folder) with tf.name_scope('dataset_mnist_test'): img = tf.constant(img) label = tf.constant(label) ind = tf.random_uniform((batch, ), minval=0, maxval=img.dims[0], dtype=tf.int32) x = tf.gather(img, ind) y = tf.gather(label, ind) if x.ndim == 3: x = x.expand_dims(3) if x.dtype != tf.float32: x = x.to_float() / 255. y = y.to_int32() return tf.dic(image=x, label=y)
def dataset_emotion(batch, threads=8, shuffle=None, capacity=10, folder=None): """ :param batch: :param threads: :param shuffle: :param capacity: :param folder: :return: dict(image, label) """ with tf.name_scope('fer2013.emotion'): # face image 48x48x1 img = tf.placeholder(tf.float32, shape=(48, 48, 1), name='image') # emotion label label = tf.placeholder(tf.int32, shape=(), name='emotion') placeholders = [img, label] q = tf.feed.gen_producer(placeholders, gen_random_face(folder), capacity=capacity, threads=threads, shuffle=shuffle) d = q.dequeue_many(batch) return tf.dic(image=d[0], label=d[1])
def attr_dataset(batch, attr, value, size=None, crop=None, threads=8, shuffle=None, partition='train', folder=None, fliplr=False): """ get data set given attribute data :param batch: :param attr: :param value: :param size: :param crop: :param threads: :param shuffle: :param partition: :param folder: :param fliplr: :return: """ folder = folder or datasetfolder() shuffle = shuffle or (partition == 'train') attr = attr.lower() files = _pair_list_file(attr, folder=folder, partition=partition) if not (os.path.exists(files[0]) and os.path.exists(files[1])): _prepare_attr_pair_list(attr, folder=folder, partition=partition) if value is False: filelist = files[0] else: filelist = files[1] imgfolder = os.path.join(folder, 'Img/img_align_celeba/') imgfolder = tf.constant(imgfolder) with tf.name_scope(None, 'celeba.attr.{0}.{1}'.format(attr, value)): line = tf.feed.read_line(filelist) line = line.print('line', first_n=10) fields = tf.decode_csv(line, [['']] + [[-1]]*40, field_delim=' ') fname = fields[0] fname = imgfolder + fname img = tf.feed.read_image(fname, channels=3) attrs = tf.stack(fields[1:]) attrs = tf.equal(attrs, 1).to_float() capacity = 512 shapes = [(218, 178, 3), (40,)] q = tf.feed.queue_producer([img, attrs], capacity, shapes=shapes, threads=threads, shuffle=shuffle) img, attrs = q.dequeue_many(batch) img = img.to_float()/255. crop = crop or (178, 178) if crop is not None: img = tf.img.crop_center(crop, img) if size is not None: img = tf.image.resize_images(img, size) if fliplr: img = tf.img.rand_fliplr(img, p=0.5) return tf.dic(image=img, label=attrs, batch=batch)
def attribute_dataset(batch, size=None, crop=None, threads=8, shuffle=None, partition='train', folder=None, fliplr=False): folder = folder or datasetfolder() # assert size is None # not implemented size attrfile = 'list_attr_{0}.txt'.format(partition) shuffle = shuffle or (partition == 'train') trainfile = os.path.join(folder, 'Anno', attrfile) imgfolder = os.path.join(folder, 'Img/img_align_celeba/') # imgfolder = os.path.join(folder, 'process/resized/') imgfolder = tf.constant(imgfolder) with tf.name_scope('celeba_attribute'): line = tf.feed.read_line(trainfile) line = line.print('line', first_n=10) fields = tf.decode_csv(line, [['']] + [[-1]] * 40, field_delim=' ') fname = fields[0] fname = imgfolder + fname img = tf.feed.read_image(fname, channels=3) attrs = tf.stack(fields[1:]) attrs = tf.equal(attrs, 1).to_float() capacity = 512 shapes = [(218, 178, 3), (40, )] q = tf.feed.queue_producer([img, attrs], capacity, shapes=shapes, threads=threads, shuffle=shuffle) img, attrs = q.dequeue_many(batch) # img = tf.img.rand_fliplr(img, p=0.5) img = img.to_float() / 255. if size is not None: img = tf.image.resize_images(img, size) if crop is not None: img = tf.img.crop_center(crop, img) if fliplr: img = tf.img.rand_fliplr(img, p=0.5) return tf.dic(image=img, label=attrs, batch=batch)
def dataset_train(batch, size=(28, 28), folder=None): (img, label), _ = _load_data(size, folder) with tf.name_scope('dataset_mnist'): # train.shape == (60000, 28, 28) img = tf.constant(img) label = tf.constant(label) ind = tf.random_uniform((batch, ), minval=0, maxval=img.dims[0], dtype=tf.int32) x = tf.gather(img, ind) y = tf.gather(label, ind) if x.ndim == 3: x = x.expand_dims(3) if x.dtype != tf.float32: x = x.to_float() / 255. y = y.to_int32() return tf.dic(image=x, label=y)