Esempio n. 1
0
def create_bin():
  args = get_args()
  image_paths = glob('{}/data/CelebA_Segment/*.*'.format(args.root_dir))
  train_image_paths, val_image_paths, _ = utils.make_paths(
      image_paths, os.path.join(args.root_dir, 'data', 'params', args.name, 'image'), args.root_dir)
  # with open('data/CelebA_RGBA.bin', 'wb') as img_f, open('data/CelebA_Landmark.bin', 'wb') as lm_f:
  with open('data/CelebA_RGBA.bin', 'wb') as img_f:
    for p in tqdm(train_image_paths):
      try:
        image = utils.load_image(p, 224, True, False)
        img_f.write(image)
        # lm_f.write(landmark)
        img_f.flush()
        # lm_f.flush()
      except Exception as e:
        print(p)
        print(e)
Esempio n. 2
0
def main():
    args = get_args()
    logger = utils.init_logger()
    logger.info(args)

    np.random.seed(args.seed)
    if not os.path.isdir(args.root_dir):
        # args.root_dir = '.'
        args.root_dir = '/mnt/d/Codes/gcn_face'
    logger.info("Loading data from %s", args.root_dir)

    if args.suffix is None:
        args.suffix = args.model
        if args.gan:
            args.suffix = args.suffix + '_gan'

    refer_mesh = read_obj(
        os.path.join(args.root_dir, 'data', 'bfm09_face_template.obj'))
    # refer_meshes = utils.get_mesh_list(args.name)

    image_paths = glob('{}/data/CelebA_Segment/*.*'.format(args.root_dir))
    _, val_image_paths, test_image_paths = utils.make_paths(
        image_paths,
        os.path.join(args.root_dir, 'data', 'params', args.name, 'image'),
        args.root_dir)

    if args.mode == 'train':
        img_file = open(os.path.join(args.root_dir, 'data', 'CelebA_RGBA.bin'),
                        'rb')
        # lm_file = open(os.path.join(args.root_dir, 'data', 'CelebA_Landmark.bin'), 'rb')
    else:
        img_file = None

    gpu_config = tf.ConfigProto(allow_soft_placement=True,
                                log_device_placement=False)
    # pylint: disable=no-member
    gpu_config.gpu_options.allow_growth = True
    with tf.Graph().as_default() as graph, tf.device('/gpu:0'), tf.Session(
            config=gpu_config) as sess:
        if args.model == 'normal':
            model = NormalModel(args, sess, graph, refer_mesh, image_paths,
                                img_file)
        elif args.model == 'resnet':
            model = ResnetModel(args, sess, graph, refer_mesh, image_paths,
                                img_file)

        if args.mode in ['train']:
            # if not os.path.exists(os.path.join('checkpoints', args.name)):
            #   os.makedirs(os.path.join('checkpoints', args.name))
            model.fit()
            img_file.close()
            # lm_file.close()
        else:
            if args.input and not os.path.isdir(args.input):
                args.input = None
            if args.input is not None:
                # input_dir = os.path.join('data', 'test', args.input)
                input_dir = args.input
                test_image_paths = [
                    os.path.join(input_dir, x)
                    for x in sorted(os.listdir(input_dir))
                ]
                if args.output is None:
                    test_dir = os.path.join('results', args.input)
            else:
                if args.output is None:
                    test_dir = model.samp_dir + '_test'
            if args.output is not None:
                test_dir = args.output
            if not os.path.isdir(test_dir):
                os.makedirs(test_dir)

            predictor_path = os.path.join(
                'data', 'shape_predictor_68_face_landmarks.dat')
            cropper = utils.ImageCropper(predictor_path, model.img_size)

            test_image = utils.load_images(test_image_paths, model.img_size,
                                           False, False, cropper)

            from face_segment import Segment
            segmenter = Segment()
            alphas = segmenter.segment(test_image)

            test_rgba = np.concatenate([test_image, alphas[..., np.newaxis]],
                                       axis=-1)

            string, results = model.evaluate(test_rgba)
            logger.info(string)

            for i, path in enumerate(test_image_paths):
                model.save_sample(results, i, test_rgba, None, test_dir, i,
                                  False)
                logger.info('Saving results from %s', path)
Esempio n. 3
0
 def test_path2rootdir(self):
     path_dicts = make_paths(read_yaml(self.yamlfile))
     for path in path_dicts:
         rfile, rdir = path2rootdir(path['path'])
         self.assertTrue(not rfile.IsZombie())
         self.assertTrue(rdir)
Esempio n. 4
0
 def test_make_paths(self):
     path_dicts = make_paths(read_yaml(self.yamlfile))
     # test 1 generic metadata field
     self.assertTrue(path_dicts[0]['title'])
     for path in path_dicts:
         self.assertTrue(path['path'])
Esempio n. 5
0
  def __init__(self, args, sess, graphs, refer_mesh, image_paths, img_file):
    self.sess = sess
    self.graph = graphs
    mesh_shape = list(refer_mesh['vertices'].shape)
    self.gan = args.gan
    self.wide = args.wide
    self.root_dir = args.root_dir
    self.img_file = img_file
    self.stage = args.stage
    if args.mode == 'test':
      self.restore = True
    else:
      self.restore = args.restore

    self.laplacians, self.downsamp_trans, self.upsamp_trans, self.pool_size = utils.init_sampling(
        refer_mesh, os.path.join(args.root_dir, 'data', 'params', args.name), args.name)
    logger.info("Transform Matrices and Graph Laplacians Generated.")
    self.refer_meshes = utils.get_mesh_list(args.name)

    self.bfm = utils.BFM_model(self.root_dir, 'data/bfm2009_face.mat')
    # color = np.ones_like(refer_mesh['vertices'], dtype=np.uint8)
    # color[self.bfm.skin_index] = 0
    # write_obj('tests.obj', refer_mesh['vertices'], refer_mesh['faces'], color)
    # write_obj('test.obj', refer_mesh['vertices'], refer_mesh['faces'], color)

    self.buffer_size = args.buffer_size
    self.workers = args.workers
    self.num_filter = [16, 16, 16, 32]
    self.c_k = 6
    self.cam_z = 34
    self.z_dim = args.nz
    self.num_vert = mesh_shape[0]
    self.vert_dim = 6
    self.drop_rate = args.drop_rate
    self.batch_size = args.batch_size
    self.num_epochs = args.epoch
    self.img_size = args.img_size
    self.learning_rate = args.lr
    self.adv_lambda = args.adv_lambda
    if args.suffix is None:
      self.dir_name = args.name
    else:
      self.dir_name = args.name + '_' + args.suffix
    self.brelu = self.b1relu
    self.pool = self.poolwT
    self.unpool = self.poolwT

    self.dilation_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                     (5, 5)).astype(np.float32)[..., np.newaxis]
    self.erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (9, 9)).astype(np.float32)[..., np.newaxis]
    # lm_3d_idx = [
    #     int(x)
    #     for x in open('data/face_landmarks.txt', 'r').readlines()
    #     if len(x.strip()) > 1
    # ]
    # # self.lm_3d_idx = lm_3d_idx[8:9] + lm_3d_idx[17:]
    # self.lm_3d_idx = lm_3d_idx[17:]
    self.lm_3d_idx = self.bfm.landmark[17:]

    train_image_paths, self.val_image_paths, self.test_image_paths = utils.make_paths(
        image_paths, os.path.join(self.root_dir, 'data', 'params', args.name, 'image'),
        self.root_dir)
    self.train_image_paths = np.array(train_image_paths, dtype='object')

    num_train = len(self.train_image_paths)
    logger.info('Number of train data: %d', num_train)
    self.num_batches = num_train // self.batch_size
    if args.eval == 0:
      self.eval_frequency = self.num_batches
    elif args.eval < 1:
      self.eval_frequency = int(self.num_batches * args.eval)
    else:
      self.eval_frequency = int(args.eval)
    logger.info('Evaluation frequency: %d', self.eval_frequency)

    self.vert_mean = np.reshape(self.bfm.shapeMU, [-1, 3])

    self.decay_steps = num_train // args.batch_size

    self.regularizers = []
    self.regularization = 5e-4

    self.ckpt_dir = os.path.join('checkpoints', self.dir_name)
    self.summ_dir = os.path.join('summaries', self.dir_name)
    self.samp_dir = os.path.join('samples', self.dir_name)

    self.build_graph()
Esempio n. 6
0
 def test_path2rootdir(self):
     path_dicts = make_paths(read_yaml(self.yamlfile))
     for path in path_dicts:
         rfile, rdir = path2rootdir(path['path'])
         self.assertTrue(not rfile.IsZombie())
         self.assertTrue(rdir)
Esempio n. 7
0
 def test_make_paths(self):
     path_dicts = make_paths(read_yaml(self.yamlfile))
     # test 1 generic metadata field
     self.assertTrue(path_dicts[0]['title'])
     for path in path_dicts:
         self.assertTrue(path['path'])
Esempio n. 8
0
from utils import read_yaml, make_paths, path2rootdir
from pprint import pprint
d = read_yaml('dsk_train_out.yaml')
allp = make_paths(d)
pprint(allp)

from ROOT import TFile

dirs = []
for path in allp:
    rfile, rdir = path2rootdir(path['path'])
    dirs.append(rdir)

print dirs
# for d in dirs:
#     d.ls()

# t = path2rootdir('dsk_train_out.root')
Esempio n. 9
0
#!/usr/bin/env python3

# Import packages
import argparse
import os

from tqdm import tqdm
import torch
import torch.nn.functional as F

from net import ResBase, ResClassifier
from data_loader import DatasetGeneratorMultimodal, MyTransformer
from utils import make_paths, add_base_args, default_paths, map_to_device

# Prepare default dataset paths
data_root_source, data_root_target, split_source_train, split_source_test, split_target = make_paths(
)

# Parse arguments
parser = argparse.ArgumentParser()
add_base_args(parser)
parser.add_argument('--output_path', default='eval_result')
args = parser.parse_args()
default_paths(args)
args.data_root = args.data_root_target
args.test_file = args.test_file_target

device = torch.device('cuda:{}'.format(args.gpu))

# Data loader (center crop, no random flip)
test_transform = MyTransformer([int(
    (256 - 224) / 2), int((256 - 224) / 2)], False)