예제 #1
0
파일: aittala.py 프로젝트: keunhong/rendkit
    def resize(self, shape):
        self.diffuse_map = resize(self.diffuse_map, shape)
        self.specular_map = resize(self.specular_map, shape)
        self.spec_shape_map = resize(self.spec_shape_map, shape)
        self.normal_map = resize(self.normal_map, shape)

        return self
예제 #2
0
def load_envmap(path, size=(512, 512)):
    if not os.path.exists(path):
        raise FileNotFoundError("{} does not exist.".format(path))
    tic = time()
    ext = os.path.splitext(path)[1]
    shape = (*size, 3)
    cube_faces = np.zeros((6, *shape), dtype=np.float32)
    if os.path.isdir(path):
        for fname in os.listdir(path):
            name = os.path.splitext(fname)[0]
            image = misc.imread(os.path.join(path, fname))
            image = misc.imresize(image, size).astype(np.float32) / 255.0
            cube_faces[_FACE_NAMES[name]] = image
    elif ext == '.pfm':
        array = pfm.pfm_read(path)
        for i, face in enumerate(unstack_cross(array)):
            cube_faces[i] = resize(face, size)[:, :, :3]
    elif ext == '.exr' or ext == '.hdr':
        import cv2
        array = cv2.imread(str(path), -1)
        array = array[:, :, [2, 1, 0]]
        for i, face in enumerate(unstack_cross(array)):
            cube_faces[i] = resize(face, size)[:, :, :3]
    elif ext == '.jpg' or ext == '.png' or ext == '.tiff':
        array = imread(path)
        for i, face in enumerate(unstack_cross(array)):
            cube_faces[i] = resize(face, size)[:, :, :3]
    else:
        raise RuntimeError("Unknown cube map format.")
    logger.info("Loaded envmap from {} ({:.04f}s)".format(path, time() - tic))
    return cube_faces
예제 #3
0
파일: minc.py 프로젝트: madhawav/kitnn
def combine_probs(prob_maps, image, remap=False, fg_mask=None):
    substances = REMAPPED_SUBSTANCES if remap else SUBSTANCES
    map_scale = 550 / min(image.shape[:2])
    map_sum = np.zeros((int(image.shape[0] * map_scale),
                        int(image.shape[1] * map_scale), len(substances)))
    for prob_map in prob_maps:
        if remap:
            resized_fg_mask = resize(fg_mask, prob_map.shape, order=0)
            prob_map = compute_remapped_probs(prob_map,
                                              fg_mask=resized_fg_mask)
        prob_map = resize(prob_map, map_sum.shape[:2])
        map_sum += prob_map
    return map_sum / len(prob_maps)
예제 #4
0
def main():
    pbar = tqdm(list(args.in_dir.glob('**/*.mdl')))
    for mdl_file in pbar:
        rel_path = Path(
            str(mdl_file).replace(str(args.in_dir), '').lstrip('/'))
        pbar.set_description(f'{rel_path}')
        in_path = args.in_dir / rel_path
        out_path = args.out_dir / rel_path
        out_path.parent.mkdir(parents=True, exist_ok=True)
        shutil.copy2(in_path, out_path)

    pbar = tqdm(
        list(args.in_dir.glob('**/*.png')) +
        list(args.in_dir.glob('**/*.jpg')))
    for png_file in pbar:
        rel_path = Path(
            str(png_file).replace(str(args.in_dir), '').lstrip('/'))
        pbar.set_description(f'{rel_path}')
        in_path = args.in_dir / rel_path
        out_path = args.out_dir / rel_path
        if out_path.exists():
            continue

        out_path.parent.mkdir(parents=True, exist_ok=True)

        image = Image.open(str(in_path))
        image = image.resize((500, 500), resample=Image.LANCZOS)
        image.save(str(out_path))

    pbar = tqdm(list(args.in_dir.glob('**/*.exr')))
    for exr_file in pbar:
        rel_path = Path(
            str(exr_file).replace(str(args.in_dir), '').lstrip('/'))
        pbar.set_description(f'{rel_path}')
        in_path = args.in_dir / rel_path
        out_path = args.out_dir / rel_path
        if out_path.exists():
            continue

        out_path.parent.mkdir(parents=True, exist_ok=True)

        image = load_hdr(in_path)
        image = resize(image, (500, 500))
        save_hdr(out_path, image)
예제 #5
0
파일: minc.py 프로젝트: madhawav/kitnn
def compute_probs_crf(image,
                      prob_map,
                      theta_p=0.1,
                      theta_L=10.0,
                      theta_ab=5.0):
    resized_im = np.clip(resize(image, prob_map.shape[:2], order=3), 0, 1)
    image_lab = rgb2lab(resized_im)

    p_y, p_x = np.mgrid[0:image_lab.shape[0], 0:image_lab.shape[1]]

    feats = np.zeros((5, *image_lab.shape[:2]), dtype=np.float32)
    d = min(image_lab.shape[:2])
    feats[0] = p_x / (theta_p * d)
    feats[1] = p_y / (theta_p * d)
    feats[2] = image_lab[:, :, 0] / theta_L
    feats[3] = image_lab[:, :, 1] / theta_ab
    feats[4] = image_lab[:, :, 2] / theta_ab
    crf = densecrf.DenseCRF2D(*prob_map.shape)
    unary = np.rollaxis(-np.log(prob_map), axis=-1).astype(dtype=np.float32,
                                                           order='c')
    crf.setUnaryEnergy(np.reshape(unary, (prob_map.shape[-1], -1)))

    compat = 2 * np.array(
        (
            # f    l    w    m    p    b
            (0.0, 1.0, 1.0, 1.0, 1.0, 3.0),  # fabric
            (1.0, 0.0, 1.0, 1.0, 1.0, 3.0),  # leather
            (1.0, 1.0, 0.0, 1.0, 1.0, 3.0),  # wood
            (1.0, 1.0, 1.0, 0.0, 1.0, 3.0),  # metal
            (1.0, 1.0, 1.0, 1.0, 0.0, 3.0),  # plastic
            (1.5, 1.5, 1.5, 1.5, 1.5, 0.0),  # background
        ),
        dtype=np.float32)

    crf.addPairwiseEnergy(np.reshape(feats, (feats.shape[0], -1)),
                          compat=compat)

    Q = crf.inference(20)
    Q = np.array(Q).reshape((-1, *prob_map.shape[:2]))
    return np.rollaxis(Q, 0, 3)
예제 #6
0
def compute_segment_substances(pair: ExemplarShapePair,
                               return_ids=False,
                               segment_map=None,
                               substance_map=None):
    if segment_map is None:
        segment_map = pair.load_data(
            config.PAIR_SHAPE_CLEAN_SEGMENT_MAP_NAME) - 1
    if substance_map is None:
        substance_map = pair.exemplar.load_data(config.EXEMPLAR_SUBST_MAP_NAME)
        substance_map = resize(substance_map, segment_map.shape, order=0)

    mesh, _ = pair.shape.load()
    seg_substances = compute_substance_ids_by_segment(substance_map,
                                                      segment_map)
    if return_ids:
        seg_substances = {k: v for k, v in seg_substances.items()}
    else:
        seg_substances = {
            mesh.materials[k]: minc.REMAPPED_SUBSTANCES[v]
            for k, v in seg_substances.items()
        }
    return seg_substances
예제 #7
0
def main():
    checkpoint_path = Path(args.checkpoint_path)
    checkpoint_name = checkpoint_path.parent.parent.name
    snapshot_path = checkpoint_path.parent.parent.parent.parent / 'snapshots' / checkpoint_name / 'snapshot.json'

    with snapshot_path.open('r') as f:
        mat_id_to_label = json.load(f)['mat_id_to_label']
        label_to_mat_id = {v: k for k, v in mat_id_to_label.items()}

    with (checkpoint_path.parent / 'model_params.json').open('r') as f:
        params_dict = json.load(f)

    print(f'Loading checkpoint from {checkpoint_path}')
    checkpoint = torch.load(checkpoint_path)

    if not args.out_name:
        # TOOD: remove this ugly thing. (There's no reason to the +1 we did)
        out_name = str(checkpoint['epoch'] - 1)
    else:
        out_name = args.out_name

    model_name = checkpoint_path.parent.name
    out_dir = (checkpoint_path.parent.parent.parent.parent / 'inference' /
               checkpoint_name / model_name / str(out_name))

    model = RendNet3(num_classes=len(label_to_mat_id) + 1,
                     num_roughness_classes=20,
                     num_substances=len(SUBSTANCES),
                     base_model=resnet.resnet18(pretrained=False),
                     output_substance=True,
                     output_roughness=True)
    model.load_state_dict(checkpoint['state_dict'])

    # model = RendNet3.from_checkpoint(checkpoint)
    model.train(False)
    model = model.cuda()

    yy = input(f'Will save to {out_dir!s}, continue? (y/n): ')
    if yy != 'y':
        return

    out_dir.mkdir(exist_ok=True, parents=True)

    print(f'Loading pairs')
    with session_scope() as sess:
        pairs, count = controllers.fetch_pairs_default(sess)
        materials = sess.query(models.Material).all()
        mat_by_id = {m.id: m for m in materials}

    pbar = tqdm(pairs)
    for pair in pbar:
        out_path = Path(out_dir, f'{pair.id}.json')
        if not args.overwrite and out_path.exists():
            continue

        if not pair.data_exists(config.PAIR_SHAPE_CLEAN_SEGMENT_MAP_NAME_OLD):
            continue
        pbar.set_description(f'Pair {pair.id}')

        exemplar = pair.exemplar
        shape = (224, 224)
        exemplar_im = resize(pair.exemplar.load_cropped_image(), shape)
        # if not exemplar.data_exists(exemplar.get_image_name(shape)):
        #     exemplar_im = resize(pair.exemplar.load_cropped_image(), shape)
        #     exemplar.save_data(exemplar.get_image_name(shape), exemplar_im)
        # else:
        #     exemplar_im = exemplar.load_data(exemplar.get_image_name(shape))

        segment_map = pair.load_data(
            config.PAIR_SHAPE_CLEAN_SEGMENT_MAP_NAME_OLD) - 1

        vis.image(exemplar_im.transpose((2, 0, 1)), win='exemplar-image')

        result_dict = {'pair_id': pair.id, 'segments': {}}

        for seg_id in [s for s in np.unique(segment_map) if s >= 0]:
            seg_mask = (segment_map == seg_id)
            topk_dict = compute_topk(label_to_mat_id, model, exemplar_im,
                                     seg_mask)
            result_dict['segments'][str(seg_id)] = topk_dict

        with open(Path(out_path), 'w') as f:
            json.dump(result_dict, f, indent=2)
예제 #8
0
파일: minc.py 프로젝트: madhawav/kitnn
def resize_image(image, scale=1.0, l_size=256, l_frac=0.233, order=2):
    small_dim_len = l_size / l_frac
    scale_mult = scale * small_dim_len / min(image.shape[:2])
    scale_shape = (int(image.shape[0] * scale_mult),
                   int(image.shape[1] * scale_mult))
    return resize(image, scale_shape, order=order)
예제 #9
0
def main():
    checkpoint_path = args.checkpoint_path
    base_dir = checkpoint_path.parent.parent.parent.parent
    snapshot_name = checkpoint_path.parent.parent.name
    lmdb_dir = (base_dir / 'lmdb' / snapshot_name)
    with (lmdb_dir / 'meta.json').open('r') as f:
        meta_dict = json.load(f)
        mat_id_to_label = meta_dict['mat_id_to_label']
        label_to_mat_id = {v: k for k, v in mat_id_to_label.items()}

    with (checkpoint_path.parent / 'model_params.json').open('r') as f:
        model_params = json.load(f)

    color_binner = None
    if 'color_hist_space' in model_params:
        color_binner = ColorBinner(
            space=model_params['color_hist_space'],
            shape=tuple(model_params['color_hist_shape']),
            sigma=tuple(model_params['color_hist_sigma']),
        )

    print(f'Loading checkpoint from {checkpoint_path!s}')
    checkpoint = torch.load(checkpoint_path)

    if not args.out_name:
        # TODO: remove this ugly thing. (There's no reason to the +1 we did)
        out_name = str(checkpoint['epoch'] - 1)
    else:
        out_name = args.out_name

    model_name = checkpoint_path.parent.name
    out_dir = (base_dir / 'inference' / snapshot_name / model_name / out_name)

    model = RendNet3.from_checkpoint(checkpoint)
    model.train(False)
    model = model.cuda()

    yy = input(f'Will save to {out_dir!s}, continue? (y/n): ')
    if yy != 'y':
        return

    out_dir.mkdir(exist_ok=True, parents=True)

    filters = []
    if args.category:
        filters.append(ExemplarShapePair.shape.has(category=args.category))

    print(f'Loading pairs')
    with session_scope() as sess:
        pairs, count = controllers.fetch_pairs_default(sess, filters=filters)
        materials = sess.query(models.Material).all()
        mat_by_id = {m.id: m for m in materials}

    pairs = [
        pair for pair in pairs
        if args.overwrite or not (Path(out_dir, f'{pair.id}.json').exists())
    ]

    pbar = tqdm(pairs)
    for pair in pbar:
        out_path = Path(out_dir, f'{pair.id}.json')
        if not args.overwrite and out_path.exists():
            continue

        if not pair.data_exists(config.PAIR_SHAPE_CLEAN_SEGMENT_MAP_NAME):
            tqdm.write(f'clean segment map not exists')
            continue
        pbar.set_description(f'Pair {pair.id}')

        exemplar = pair.exemplar
        shape = (224, 224)
        exemplar_im = pair.exemplar.load_cropped_image()
        exemplar_im = skimage.transform.resize(exemplar_im,
                                               shape,
                                               anti_aliasing=True,
                                               order=3,
                                               mode='constant',
                                               cval=1)
        # if not exemplar.data_exists(exemplar.get_image_name(shape)):
        #     exemplar_im = resize(pair.exemplar.load_cropped_image(),
        #                          shape, order=3)
        #     exemplar.save_data(exemplar.get_image_name(shape), exemplar_im)
        # else:
        #     exemplar_im = exemplar.load_data(exemplar.get_image_name(shape))

        segment_map = pair.load_data(
            config.PAIR_SHAPE_CLEAN_SEGMENT_MAP_NAME) - 1
        substance_map = pair.exemplar.load_data(config.EXEMPLAR_SUBST_MAP_NAME)
        substance_map = resize(substance_map, segment_map.shape, order=0)

        vis.image(exemplar_im.transpose((2, 0, 1)), win='exemplar-image')

        result_dict = {'pair_id': pair.id, 'segments': {}}

        subst_id_by_seg_id = compute_segment_substances(
            pair,
            return_ids=True,
            segment_map=segment_map,
            substance_map=substance_map)

        for seg_id in [s for s in np.unique(segment_map) if s >= 0]:
            seg_mask = (segment_map == seg_id)
            topk_dict = compute_topk(
                label_to_mat_id,
                model,
                exemplar_im,
                seg_mask,
                minc_substance=SUBSTANCES[subst_id_by_seg_id[seg_id]],
                color_binner=color_binner,
                mat_by_id=mat_by_id)
            result_dict['segments'][str(seg_id)] = topk_dict

        with open(Path(out_path), 'w') as f:
            json.dump(result_dict, f, indent=2)
예제 #10
0
def resize_flow(vx, vy, shape):
    scaley = shape[0] / vx.shape[0]
    scalex = shape[1] / vx.shape[1]
    vx = resize(vx * scalex, shape, order=3)
    vy = resize(vy * scaley, shape, order=3)
    return vx, vy