コード例 #1
0
def batch_resize(in_dir, out_dir, ratio=.5):
    ensure_dir(out_dir)

    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".jpg",
                             out_ext=".jpg"):
        resize(in_f, out_f, ratio)
    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".png",
                             out_ext=".png"):
        resize(in_f, out_f, ratio)
コード例 #2
0
def batch_undistortion(cal_dir=CAL_DIR,
                       in_dir=IMG_DIR,
                       out_dir=UND_DIR,
                       crop=True):
    ensure_dir(out_dir)

    cal_f = files(dir=cal_dir, ext=".jpg")[0]
    mapx, mapy, roi = undistortion_parameters(cal_f)
    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".jpg",
                             out_ext=".jpg"):
        undistortion(in_f, out_f, mapx, mapy, roi, crop)
コード例 #3
0
def batch_raw2jpg(in_dir=RAW_DIR, out_dir=IMG_DIR):
    ensure_dir(out_dir)

    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".nef",
                             out_ext=".jpg"):
        raw2jpg(in_f, out_f)
コード例 #4
0
def prepare_annotator(img_dir=IMG_DIR,
                      ann_dir=ANN_DIR,
                      app_dir=APP_DIR,
                      classes=("background", "plant", "panicle")):
    img_out_dir = app_dir + "/data/images"
    for f in files(img_out_dir, ".jpg"):
        os.remove(f)
    for f in files(img_dir, ".jpg"):
        shutil.copy(f, img_out_dir)
        file_message(img_out_dir + "/" + os.path.basename(f))

    ann_out_dir = app_dir + "/data/annotations"
    for f in files(ann_out_dir, ".png"):
        os.remove(f)
    for f in files(ann_dir, ".png"):
        shutil.copy(f, ann_out_dir)
        file_message(ann_out_dir + "/" + os.path.basename(f))

    jsonfile = files(app_dir + "/data", ".json")[0]
    with open(jsonfile, "r") as read_file:
        data = json.load(read_file)
    data["labels"] = classes
    data["imageURLs"] = [
        "data/images/" + os.path.basename(f)
        for f in files(img_out_dir, ".jpg")
    ]
    data["annotationURLs"] = [
        "data/annotations/" + os.path.basename(f)
        for f in files(ann_out_dir, ".png")
    ]
    with open(jsonfile, "w") as write_file:
        json.dump(data, write_file)
    file_message(jsonfile)
コード例 #5
0
def batch_threshold_segmentation(limits=([0, 255], [54, 255], [0, 255]),
                                 colour_space="HSV",
                                 in_dir=IMG_DIR,
                                 out_dir=ANN_DIR):
    ensure_dir(out_dir)

    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".jpg",
                             out_ext=".png"):
        threshold_segmentation(in_f, out_f, limits, colour_space)
コード例 #6
0
ファイル: python.py プロジェクト: PeithVergil/bugtrax-fabric
def conda_install_requirements(venv):
    """
    Run "pip install -r" on the requirements file.

    :Example:

    fab --config=config.conf python.conda_install_requirements:venv=bugtrax
    """
    # Upload the requirements file.
    put(utils.files('requirements', 'base.txt'), utils.home('base.txt'))
    put(utils.files('requirements', 'prod.txt'), utils.home('prod.txt'))

    # Activate the virtual environment.
    activate = '{0}/bin/activate'.format(utils.home('apps', 'miniconda'))

    with prefix('source {activate} {venv}'.format(venv=venv, activate=activate)):
        run('pip install -r {0}'.format(utils.home('prod.txt')))

    # Remove the uploaded files.
    with cd(utils.home()):
        run('rm {0}'.format(utils.home('base.txt')))
        run('rm {0}'.format(utils.home('prod.txt')))
コード例 #7
0
def batch_detect_markers(in_dir=UNDCR_DIR, out_dir=ARUCO_DIR, show=True):
    ensure_dir(out_dir)

    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_7X7_1000)
    aruco_params = cv2.aruco.DetectorParameters_create()
    for in_f, out_f in files(in_dir=in_dir,
                             out_dir=out_dir,
                             in_ext=".jpg",
                             out_ext=".pkl"):
        detect_markers(in_f,
                       out_f,
                       aruco_dict=aruco_dict,
                       aruco_params=aruco_params,
                       show=show)
コード例 #8
0
def load_data():
    # rumours = {k: {} for k in folders('.')}
    data = {}
    for rumour, r_location in folders(DATA_LOCATION):
        data[rumour] = {}
        for thread, t_location in folders(r_location):
            try:
                replies = files(path.join(t_location, 'replies'))
            except StopIteration:
                replies = []
            # print(rumour, thread, path.join(t_location, 'replies'))
            data[rumour][thread] = {
                "structure": read(path.join(t_location, 'structure.json')),
                "source": tweet(path.join(t_location, 'source-tweet', thread + '.json')),
                "replies": {id[:-5]: tweet(f) for id, f in replies}
            }

    write('data/data.json', data)
    return data
コード例 #9
0
import os
import pandas
from lxml import etree
from decimal import Decimal

from utils import files, int_file, pairs


_keys = map(int_file, files('ufrawkey'))
_frames = dict(map(int_file, files('cr2')))

exposure_keys = list()
basemanualcurve_keys = list()
manualcurve_keys = list()


def curve_anchors(tree, curve_name):
    anchors = list()
    for anchorxy in tree.find(curve_name).iterchildren():
        X, Y = anchorxy.text.split()
        anchors.extend([Decimal(X), Decimal(Y)])
    return anchors

for int_, filename in sorted(_keys):
    with open(filename) as keyframe_file:
        contents = keyframe_file.read()
        parser = etree.XMLParser()
        parser.feed(contents)
        tree = parser.close()
        exposure_keys.append(
            (int(int_), Decimal(tree.find('Exposure').text)))
コード例 #10
0
def main():
    args = get_parser()

    # source faces
    srcFaces = tqdm(files(args.srcFacePath, ['.jpg']))

    # real faces database
    #ds = image2pilBatch(files(args.faceDatabase, ['.jpg']))

    # face detector
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args.shapePredictor)

    for i, srcFace in enumerate(srcFaces):
        # load bgr
        try:
            srcFaceBgr = cv2.imread(srcFace)
        except:
            tqdm.write(f'Fail loading: {srcFace}')
            continue

        # detect landmarks
        srcLms = get_landmarks(detector, predictor,
                               cv2.cvtColor(srcFaceBgr, cv2.COLOR_BGR2RGB))
        if srcLms is None:
            tqdm.write(f'No face: {srcFace}')
            continue

        # find first face whose landmarks are close enough in real face database
        targetRgb = find_one_neighbor(detector, predictor, srcFace, srcLms,
                                      files(args.faceDatabase, ['.jpg']),
                                      args.threshold)
        if targetRgb is None:  # if not found
            tqdm.write(f'No Match: {srcFace}')
            continue

        # if found
        targetBgr = cv2.cvtColor(targetRgb, cv2.COLOR_RGB2BGR)
        hullMask = convex_hull(
            srcFaceBgr.shape,
            srcLms)  # size (h, w, c) mask of face convex hull

        # generate random deform
        anchors, deformedAnchors = random_deform(hullMask.shape[:2], 4, 4)

        # piecewise affine transform and blur
        warped = piecewise_affine_transform(
            hullMask, anchors, deformedAnchors)  # size (h, w) warped mask
        blured = cv2.GaussianBlur(warped, (5, 5), 3)

        # swap
        left, up, right, bot = min(srcLms[:, 0]), min(srcLms[:, 1]), max(
            srcLms[:, 0]), max(srcLms[:, 1])
        targetBgrT = color_transfer(srcFaceBgr[up:bot, left:right, :],
                                    targetBgr)
        resultantFace = forge(srcFaceBgr, targetBgrT, blured)  # forged face

        # save face images
        cv2.imwrite(f'./dump/mask_{i}.jpg', hullMask)
        cv2.imwrite(f'./dump/deformed_{i}.jpg', warped * 255)
        cv2.imwrite(f'./dump/blured_{i}.jpg', blured * 255)
        cv2.imwrite(f'./dump/src_{i}.jpg', srcFaceBgr)
        cv2.imwrite(f'./dump/target_{i}.jpg', targetBgr)
        cv2.imwrite(f'./dump/target_T_{i}.jpg', targetBgrT)
        cv2.imwrite(f'./dump/forge_{i}.jpg', resultantFace)
コード例 #11
0
 def _needs_remove(self, directory):
     return any((os.path.splitext(item)[1] in self.supported_suffixes
                 and self._has_tags(item)) for item in files(directory))
コード例 #12
0
 def _needs_add(self, directory, force):
     return any((os.path.splitext(item)[1] in self.supported_suffixes and (
         not self._has_tags(item) or force)) for item in files(directory))
コード例 #13
0
def main():
    args = get_parser()
    import dlib

    # source faces
    srcFaces = tqdm(files(args.srcFacePath, ['.jpg']))

    # real faces database
    # ds = image2pilBatch(files(args.faceDatabase, ['.jpg']))

    # face detector
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args.shapePredictor)

    for i, srcFace in enumerate(srcFaces):
        # load bgr
        try:
            srcFaceBgr = cv2.imread(srcFace)
        except:
            tqdm.write(f'Fail loading: {srcFace}')
            continue

        # detect landmarks
        srcLms = get_landmarks(detector, predictor,
                               cv2.cvtColor(srcFaceBgr, cv2.COLOR_BGR2RGB))
        if srcLms is None:
            tqdm.write(f'No face: {srcFace}')
            continue

        # find first face whose landmarks are close enough in real face database
        targetRgb = find_one_neighbor(detector, predictor, srcFace, srcLms,
                                      files(args.faceDatabase, ['.jpg']),
                                      args.threshold)
        if targetRgb is None:  # if not found
            tqdm.write(f'No Match: {srcFace}')
            continue

        # if found
        # 产生凸包
        targetBgr = cv2.cvtColor(targetRgb, cv2.COLOR_RGB2BGR)
        hullMask = convex_hull(
            srcFaceBgr.shape, srcLms
        )  # size (h, w, c) mask of face convex hull, uint8 [0, 255]  # TODO: 去掉channel减少参数

        # 产生随机变形
        anchors, deformedAnchors = random_deform(hullMask.shape[:2], 4,
                                                 4)  # 随机变形存在问题:需要取变形结果与原图的交集。

        # 分段仿射变换
        warped = piecewise_affine_transform(
            hullMask, anchors,
            deformedAnchors)  # size (h, w, c) warped mask float64 [0, 1.0]
        # 将 warped 区域限制在人脸范围内,避免背景的影响
        warped *= (hullMask / hullMask.max())

        # 高斯模糊
        blured = cv2.GaussianBlur(warped, (5, 5), 3)

        # 颜色矫正,迁移高斯模糊后blured mask区域的颜色,并对颜色纠正的blured区域人脸+原背景作为融合图片
        left, up, right, bot = get_roi(warped)  # 获取 warped 区域
        src = (srcFaceBgr[up:bot, left:right, :]).astype(np.uint8)
        tgt = (targetBgr[up:bot, left:right, :]).astype(np.uint8)
        # pdb.set_trace()
        targetBgrT = color_transfer(src, tgt)
        cv2.imwrite(f'results/transfer/src.jpg', src)
        cv2.imwrite(f'results/transfer/tgt.jpg', tgt)
        cv2.imwrite(f'results/transfer/tgtrans.jpg', targetBgrT)
        targetBgr_T = targetBgr * 1  # 开辟新内存空间
        targetBgr_T[up:bot, left:right, :] = targetBgrT  # 将色彩迁移的部分转移到原图片

        # 融合
        resultantFace = forge(srcFaceBgr, targetBgr_T, blured)  # forged face

        # 混合边界
        resultantBounding = get_bounding(blured)

        # save face images
        cv2.imwrite(f'./results/mask_{i}.jpg', hullMask)
        cv2.imwrite(f'./results/deformed_{i}.jpg', warped * 255)
        cv2.imwrite(f'./results/blured_{i}.jpg', blured * 255)
        cv2.imwrite(f'./results/src_{i}.jpg', srcFaceBgr)
        cv2.imwrite(f'./results/target_{i}.jpg', targetBgr)
        cv2.imwrite(f'./results/target_T_{i}.jpg', targetBgrT)
        cv2.imwrite(f'./results/forge_{i}.jpg', resultantFace)
        cv2.imwrite(f'./results/bounding_{i}.jpg', resultantBounding * 255)
コード例 #14
0
ファイル: render.py プロジェクト: bmcorser/timelapse-tooling
import os
from multiprocessing import Pool
from subprocess import check_call

from utils import files


def render_jpg(R):
    return R


if __name__ == '__main__':
    pool = Pool(8)
    raw_files = files('cr2')
    num = len(raw_files)
    for pos, ret in enumerate(pool.imap_unordered(render_jpg, raw_files)):
        pos += 1
        fmt_string = "{0}/{1} ({2:.3f}%) done"
        print(fmt_string.format(pos + 1, num, ((pos + 1) / float(num)) * 100))
    pool.close()
    pool.join()