def __generate_and_correct_box_file(size_lpi, size_cpi, font):
    font_name = font.replace("\n", "")
    file_name = font_name +  '_' + str(size_lpi) + '_' +  str(size_cpi)
    file_full_name = file_name + '/' + file_name
    util.create_dir(file_name)
    __generate_ps(size_lpi, size_cpi, font_name)
    __generate_tif(file_full_name)
    __generate_box(file_full_name)
    __correct_box(PATH_ALPH, file_full_name + '.box', file_full_name + '_correct.box')
Exemplo n.º 2
0
    def save_detections(self, video_meta):
        """ Saves detections to disk
        """

        # Save detections
        output_dir = os.path.join('./output', video_meta['filename'])
        create_dir(output_dir)

        # Save all_recognitions dict
        logger.info('Saving recognitions per class')

        # Interpolate and save recognitions
        self.interpolate_recognition_dict(output_dir, video_meta)

        # Save all_detections dict
        logger.info('Saving all detections in one file')

        output_path = os.path.join(output_dir, 'detect_00_all.csv')

        all_detections_df = pd.DataFrame(data=self.all_detections)
        all_detections_df.to_csv(output_path, index=None)

        # Save per_class_detections dict
        logger.info('Saving detections per class')

        # Fill list with all args to run through save_dicts_for_classes()
        arg_list = []
        for class_id in self.per_class_detections:
            output_path = os.path.join(
                output_dir, 'detect_{}_{}.csv'.format(class_id,
                                                      self.id2cat[class_id]))
            arg_list.append(((class_id, video_meta), output_path))

        with Pool(processes=self.config['settings']['num_workers']) as pool:
            pool.starmap(self.save_dicts_for_classes, arg_list)

        # Clear dicts for next video file
        self.prepare_dicts()
def __copy_font_files(path):
    home = util.get_home_dir_name()
    util.create_dir(home + PATH_FONTS)
    util.copy_files(path, home + PATH_FONTS)
Exemplo n.º 4
0
    def save_result(self):
        """Save 3DMM coef and image"""
        create_dir(os.path.join(self.opt.data_dir, 'render'))
        create_dir(os.path.join(self.opt.data_dir, 'overlay'))
        create_dir(os.path.join(self.opt.data_dir, 'alpha'))
        create_dir(os.path.join(self.opt.data_dir, 'beta'))
        create_dir(os.path.join(self.opt.data_dir, 'delta'))
        create_dir(os.path.join(self.opt.data_dir, 'gamma'))
        create_dir(os.path.join(self.opt.data_dir, 'rotation'))
        create_dir(os.path.join(self.opt.data_dir, 'translation'))

        for i in range(self.opt.batch_size):
            utils.save_image(self.render[i], os.path.join(self.opt.data_dir, 'render', self.image_name[i]))
            utils.save_image(self.overlay[i], os.path.join(self.opt.data_dir, 'overlay', self.image_name[i]))

            torch.save(self.alpha[0].detach().cpu(), os.path.join(self.opt.data_dir, 'alpha', self.image_name[i][:-4]+'.pt'))
            torch.save(self.beta[0].detach().cpu(), os.path.join(self.opt.data_dir, 'beta', self.image_name[i][:-4]+'.pt'))
            torch.save(self.delta[i].detach().cpu(), os.path.join(self.opt.data_dir, 'delta', self.image_name[i][:-4]+'.pt'))
            torch.save(self.gamma[i].detach().cpu(), os.path.join(self.opt.data_dir, 'gamma', self.image_name[i][:-4]+'.pt'))
            torch.save(self.rotation[i].detach().cpu(), os.path.join(self.opt.data_dir, 'rotation', self.image_name[i][:-4]+'.pt'))
            torch.save(self.translation[i].detach().cpu(), os.path.join(self.opt.data_dir, 'translation', self.image_name[i][:-4]+'.pt'))
Exemplo n.º 5
0
import os
from tqdm import tqdm
from torchvision import utils

from renderer.face_model import FaceModel
from options.options import Options
from utils.util import create_dir, load_coef

if __name__ == '__main__':
    opt = Options().parse_args()

    create_dir(os.path.join(opt.src_dir, 'reenact'))

    alpha_list = load_coef(os.path.join(opt.tgt_dir, 'alpha'), opt.test_num)
    beta_list = load_coef(os.path.join(opt.tgt_dir, 'beta'), opt.test_num)
    delta_list = load_coef(os.path.join(opt.src_dir, 'reenact_delta'),
                           opt.test_num)
    gamma_list = load_coef(os.path.join(opt.tgt_dir, 'gamma'), opt.test_num)
    angle_list = load_coef(os.path.join(opt.tgt_dir, 'rotation'), opt.test_num)
    translation_list = load_coef(os.path.join(opt.tgt_dir, 'translation'),
                                 opt.test_num)

    face_model = FaceModel(data_path=opt.matlab_data_path, batch_size=1)

    for i in tqdm(range(len(delta_list))):
        alpha = alpha_list[i + opt.offset].unsqueeze(0).cuda()
        beta = beta_list[i + opt.offset].unsqueeze(0).cuda()
        delta = delta_list[i].unsqueeze(0).cuda()
        gamma = gamma_list[i + opt.offset].unsqueeze(0).cuda()
        rotation = angle_list[i + opt.offset].unsqueeze(0).cuda()
        translation = translation_list[i + opt.offset].unsqueeze(0).cuda()
Exemplo n.º 6
0
from tqdm import tqdm

from options.options import Options
from datasets import create_dataset
from models import audio_expression_model
from utils.util import create_dir

if __name__ == '__main__':
    opt = Options().parse_args()  # get training options
    # hard-code some parameters for test
    opt.isTrain = False
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.

    create_dir(os.path.join(opt.data_dir, 'reenact_delta'))

    dataset = create_dataset(opt)

    model = audio_expression_model.AudioExpressionModel(opt)
    model.load_network()
    model.eval()

    for i, data in enumerate(tqdm(dataset)):
        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference
        model.save_delta()

        if i >= opt.test_num - 1:
            break
Exemplo n.º 7
0
import os
import cv2
import numpy as np
from tqdm import tqdm

from options.options import Options
from utils.util import create_dir, get_file_list


if __name__ == '__main__':
    opt = Options().parse_args()

    create_dir(os.path.join(opt.src_dir, 'comp'))

    foregrounds = get_file_list(os.path.join(opt.src_dir, 'images'), suffix='fake')
    backgrounds = get_file_list(os.path.join(opt.tgt_dir, 'crop'))
    masks = get_file_list(os.path.join(opt.tgt_dir, 'mask'))

    for i in tqdm(range(len(foregrounds))):
        fg = cv2.imread(foregrounds[i])
        bg = cv2.imread(backgrounds[i + opt.offset])

        mask = cv2.imread(masks[i + opt.offset])
        mask = cv2.erode(mask, np.ones((3,3), np.uint8), iterations=9)
        mask = cv2.GaussianBlur(mask, (5,5), cv2.BORDER_DEFAULT) / 255.0

        comp = mask * fg + (1 - mask) * bg

        cv2.imwrite(os.path.join(opt.src_dir, 'comp', '%05d.png' % (i+1)), comp)

        if i >= opt.test_num: