コード例 #1
0
ファイル: Pipeline.py プロジェクト: v-wewei/BlenderProc
    def __init__(self,
                 config_path,
                 args,
                 working_dir,
                 temp_dir,
                 avoid_output=False):
        """
        Inits the pipeline, by calling the constructors of all modules mentioned in the config.

        :param config_path: path to the config
        :param args: arguments which were provided to the run.py and are specified in the config file
        :param working_dir: the current working dir usually the place where the run.py sits
        :param working_dir: the directory where to put temporary files during the execution
        :param avoid_output: if this is true, all modules (renderers and writers) skip producing output. With this it is possible to debug \
                               properly.
        """
        Utility.working_dir = working_dir

        config_parser = ConfigParser(silent=True)
        config = config_parser.parse(Utility.resolve_path(config_path), args)

        # Setup pip packages specified in config
        SetupUtility.setup_pip(config["setup"]["pip"] if "pip" in
                               config["setup"] else [])

        if avoid_output:
            GlobalStorage.add_to_config_before_init("avoid_output", True)

        Utility.temp_dir = Utility.resolve_path(temp_dir)
        os.makedirs(Utility.temp_dir, exist_ok=True)

        self.modules = Utility.initialize_modules(config["modules"])
コード例 #2
0
ファイル: run.py プロジェクト: eugval/BlenderProc
# Read args
argv = sys.argv
batch_index_file = None

if( '--debug' in argv):
    pydevd_pycharm.settrace('localhost', port=1234, stdoutToServer=True, stderrToServer=True)

if "--batch-process" in argv:
    batch_index_file = argv[argv.index("--batch-process") + 1]

argv = argv[argv.index("--") + 1:]# +["/Users/Tjoun/BlenderProc/resources/Shapenet.v2" "/Users/Tjoun/BlenderProc/examples/OneObj/output"]
working_dir = os.path.dirname(os.path.abspath(__file__))

from src.utility.SetupUtility import SetupUtility
# Setup general required pip packages e.q. pyyaml
SetupUtility.setup_pip([])

from src.main.Pipeline import Pipeline
from src.utility.Utility import Utility

config_path = argv[0]
temp_dir = argv[1]

if batch_index_file == None:
    pipeline = Pipeline(config_path, argv[2:], working_dir, temp_dir)
    pipeline.run()
else:
    with open(Utility.resolve_path(batch_index_file), "r") as f:
        lines = f.readlines()

        for line in lines:
コード例 #3
0
from src.utility.SetupUtility import SetupUtility

SetupUtility.setup_pip(["Pillow", "opencv-contrib-python"])

import os
from math import tan

import bpy
import cv2
import numpy as np

from src.main.GlobalStorage import GlobalStorage
from src.renderer.RendererInterface import RendererInterface
from src.utility.BlenderUtility import load_image
from src.utility.SGMUtility import fill_in_fast
from src.utility.SGMUtility import resize
from src.utility.Utility import Utility


class StereoGlobalMatchingWriter(RendererInterface):
    """ Writes depth image generated from the stereo global matching algorithm to file

    **Configuration**:

    .. list-table:: 
        :widths: 25 100 10
        :header-rows: 1

        * - Parameter
          - Description
          - Type
コード例 #4
0
ファイル: WriterUtility.py プロジェクト: v-wewei/BlenderProc
import os
from typing import List, Dict, Union, Any, Set, Tuple
from collections import defaultdict

from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["h5py"])

import numpy as np
import csv
import math
import json

import bpy
import mathutils
import h5py

from src.utility.BlenderUtility import load_image
from src.utility.MathUtility import MathUtility
from src.utility.Utility import Utility
from src.utility.CameraUtility import CameraUtility


class WriterUtility:
    @staticmethod
    def load_registered_outputs(keys: Set[str]) -> Dict[str, List[np.ndarray]]:
        """
        Loads registered outputs with specified keys

        :param keys: set of output_key types to load
        :return: dict of lists of raw loaded outputs. Keys can be 'distance', 'colors', 'normals'
        """
コード例 #5
0
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["scikit-image"])

import datetime
from itertools import groupby
import csv
import json
import os
import shutil
import numpy as np
from skimage import measure

import bpy

from src.utility.Utility import Utility


class CocoWriterUtility:
    @staticmethod
    def write(output_dir: str,
              mask_encoding_format="rle",
              supercategory="coco_annotations",
              append_to_existing_output=False,
              segmap_output_key="segmap",
              segcolormap_output_key="segcolormap",
              rgb_output_key="colors"):
        """ Writes coco annotations in the following steps:
        1. Locate the seg images
        2. Locate the rgb maps
        3. Locate the seg mappings
        4. Read color mappings
コード例 #6
0
    def extract(mesh_objects: [MeshObject], compare_angle_degrees: float = 7.5, compare_height: float = 0.15, up_vector_upwards: bool = True, height_list_path: str = None, new_name_for_object: str = "Floor", should_skip_if_object_is_already_there: bool = False) -> [MeshObject]:
        """ Extracts floors in the following steps:
        1. Searchs for the specified object.
        2. Splits the surfaces which point upwards at a specified level away.

        :param mesh_objects: Objects to where all polygons will be extracted.
        :param compare_angle_degrees: Maximum difference between the up vector and the current polygon normal in degrees.
        :param compare_height: Maximum difference in Z direction between the polygons median point and the specified height of the room.
        :param up_vector_upwards: If this is True the `up_vec` points upwards -> [0, 0, 1] if not it points downwards: [0, 0, -1] in world coordinates. This vector is used for the `compare_angle_degrees` option.
        :param height_list_path: Path to a file with height values. If none is provided, a ceiling and floor is automatically detected. \
                                 This might fail. The height_list_values can be specified in a list like fashion in the file: [0.0, 2.0]. \
                                 These values are in the same size the dataset is in, which is usually meters. The content must always be \
                                 a list, e.g. [0.0].
        :param new_name_for_object: Name for the newly created object, which faces fulfill the given parameters.
        :param should_skip_if_object_is_already_there: If this is true no extraction will be done, if an object is there, which has the same name as
                                                       name_for_split_obj, which would be used for the newly created object.
        :return: The extracted floor objects.
        """
        # set the up_vector
        up_vec = mathutils.Vector([0, 0, 1])
        if not up_vector_upwards:
            up_vec *= -1.0

        height_list = []
        if height_list_path is not None:
            height_file_path = Utility.resolve_path(height_list_path)
            with open(height_file_path) as file:
                import ast
                height_list = [float(val) for val in ast.literal_eval(file.read())]

        object_names = [obj.name for obj in bpy.context.scene.objects if obj.type == "MESH"]

        def clean_up_name(name: str):
            """
            Clean up the given name from Floor1 to floor

            :param name: given name
            :return: str: cleaned up name
            """
            name = ''.join([i for i in name if not i.isdigit()])  # remove digits
            name = name.lower().replace(".", "").strip()  # remove dots and whitespace
            return name

        object_names = [clean_up_name(name) for name in object_names]
        if should_skip_if_object_is_already_there and new_name_for_object.lower() in object_names:
            # if should_skip is True and if there is an object, which name is the same as the one for the newly
            # split object, than the execution is skipped
            return []

        newly_created_objects = []
        for obj in mesh_objects:
            obj.edit_mode()
            bm = obj.mesh_as_bmesh()
            bpy.ops.mesh.select_all(action='DESELECT')

            if height_list:
                counter = 0
                for height_val in height_list:
                    counter = FloorExtractor.select_at_height_value(bm, height_val, compare_height, up_vec,
                                                                   compare_angle_degrees, obj.get_local2world_mat())

                if counter:
                    obj.update_from_bmesh(bm)
                    bpy.ops.mesh.separate(type='SELECTED')
            else:
                from src.utility.SetupUtility import SetupUtility
                SetupUtility.setup_pip(["scikit-learn"])
                from sklearn.cluster import MeanShift, estimate_bandwidth

                # no height list was provided, try to estimate them on its own

                # first get a list of all height values of the median points, which are inside of the defined
                # compare angle range
                list_of_median_poses = [FloorExtractor._get_median_face_pose(f, obj.get_local2world_mat())[2] for f in bm.faces if
                                        FloorExtractor._check_face_angle(f, obj.get_local2world_mat(), up_vec, compare_angle_degrees)]
                if not list_of_median_poses:
                    print("Object with name: {} is skipped no faces were relevant, try with "
                          "flipped up_vec".format(obj.get_name()))
                    list_of_median_poses = [FloorExtractor._get_median_face_pose(f, obj.get_local2world_mat())[2] for f in
                                            bm.faces if FloorExtractor._check_face_angle(f, obj.get_local2world_mat(),
                                                                                         -up_vec, compare_angle_degrees)]
                    if not list_of_median_poses:
                        print("Still no success for: {} skip object.".format(obj.get_name()))
                        bpy.ops.object.mode_set(mode='OBJECT')
                        bpy.ops.object.select_all(action='DESELECT')
                        continue

                    successful_up_vec = -up_vec
                else:
                    successful_up_vec = up_vec

                list_of_median_poses = np.reshape(list_of_median_poses, (-1, 1))
                if np.var(list_of_median_poses) < 1e-4:
                    # All faces are already correct
                    height_value = np.mean(list_of_median_poses)
                else:
                    ms = MeanShift(bandwidth=0.2, bin_seeding=True)
                    ms.fit(list_of_median_poses)

                    # if the up vector is negative the maximum value is searched
                    if up_vector_upwards:
                        height_value = np.min(ms.cluster_centers_)
                    else:
                        height_value = np.max(ms.cluster_centers_)

                counter = FloorExtractor.select_at_height_value(bm, height_value, compare_height, successful_up_vec,
                                                               compare_angle_degrees, obj.get_local2world_mat())

                if counter:
                    obj.update_from_bmesh(bm)
                    bpy.ops.mesh.separate(type='SELECTED')
                selected_objects = bpy.context.selected_objects
                if selected_objects:
                    if len(selected_objects) == 2:
                        selected_objects = [o for o in selected_objects
                                            if o != bpy.context.view_layer.objects.active]
                        selected_objects[0].name = new_name_for_object
                        newly_created_objects.append(MeshObject(selected_objects[0]))
                    else:
                        raise Exception("There is more than one selection after splitting, this should not happen!")
                else:
                    raise Exception("No floor object was constructed!")

            obj.object_mode()

        return newly_created_objects
コード例 #7
0
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["scikit-image", "opencv-contrib-python"])

import datetime
from itertools import groupby
import csv
import json
import os
import shutil
import numpy as np
from skimage import measure
from typing import List
import cv2
import bpy

from src.utility.Utility import Utility
from src.utility.LabelIdMapping import LabelIdMapping


class CocoWriterUtility:
    @staticmethod
    def write(output_dir: str,
              instance_segmaps: List[np.ndarray] = [],
              instance_attribute_maps: List[dict] = [],
              colors: List[np.ndarray] = [],
              color_file_format: str = "PNG",
              mask_encoding_format="rle",
              supercategory="coco_annotations",
              append_to_existing_output: bool = True,
              segmap_output_key="segmap",
              segcolormap_output_key="segcolormap",
コード例 #8
0
from collections import defaultdict
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["imageio"])

import bpy
import bmesh
import mathutils
from mathutils import Vector

import numpy as np
import imageio

from src.utility.Utility import Utility


def local_to_world(cords, world):
    """
    Returns a cords transformed to the given transformation world matrix

    :param cords: coordinates a tuple of 3 values for x,y,z
    :param world: world matrix <- transformation matrix
    """
    return [world @ Vector(cord) for cord in cords]


def get_bounds(obj):
    """
    :param obj: a mesh object
    :return: [8x[3xfloat]] the object aligned bounding box coordinates in world coordinates
    """
    return local_to_world(obj.bound_box, obj.matrix_world)
コード例 #9
0
ファイル: OilPaintFilter.py プロジェクト: v-wewei/BlenderProc
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["scipy"])

import cv2
import numpy as np
from scipy import stats

from src.main.Module import Module
from src.utility.PostProcessingUtility import PostProcessingUtility

class OilPaintFilter(Module):
    """
    Applies the oil paint filter on a single channel image (or more than one channel, where each channel is a replica
    of the other). This could be desired for corrupting rendered depth maps to appear more realistic. Also trims the
    redundant channels if they exist.

    **Configuration**:

    .. list-table:: 
        :widths: 25 100 10
        :header-rows: 1

        * - Parameter
          - Description
          - Type
        * - filter_size
          - Mode filter size, should be an odd number. Default: 5
          - int
        * - edges_only
          - If true, applies the filter on the edges only. For RGB images, they should be represented in uint8
            arrays. Default: True
コード例 #10
0
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(["opencv-contrib-python", "pypng==0.0.20"])

import json
import os
import glob
import numpy as np
import shutil
from typing import List
import png
import cv2
import bpy
from mathutils import Matrix

from src.utility.BlenderUtility import get_all_blender_mesh_objects
from src.utility.Utility import Utility
from src.utility.PostProcessingUtility import PostProcessingUtility
from src.utility.WriterUtility import WriterUtility


class BopWriterUtility:
    """ Saves the synthesized dataset in the BOP format. The dataset is split
        into chunks which are saved as individual "scenes". For more details
        about the BOP format, visit the BOP toolkit docs:
        https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md

    """
    @staticmethod
    def _load_json(path, keys_to_int=False):
        """Loads content of a JSON file.
        From the BOP toolkit (https://github.com/thodan/bop_toolkit).
コード例 #11
0
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(
    ["scikit-image", "pypng==0.0.20", "scipy==1.2.2", "matplotlib==2.2.4"])

import os
import sys
from random import choice
from typing import List

import bpy
import numpy as np
from mathutils import Matrix, Vector

from src.utility.CameraUtility import CameraUtility
from src.utility.MeshObjectUtility import MeshObject
from src.utility.Utility import Utility


class BopLoader:
    @staticmethod
    def load(bop_dataset_path: str,
             temp_dir: str,
             sys_paths: list,
             model_type: str = "",
             cam_type: str = "",
             split: str = "test",
             scene_id: int = -1,
             obj_ids: list = [],
             sample_objects: bool = False,
             num_of_objs_to_sample: int = None,
             obj_instances_limit: int = -1,
コード例 #12
0
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip([
    "git+https://github.com/abahnasy/smplx",
    "git+https://github.com/abahnasy/human_body_prior"
])

import glob
import json
import os
import random
from datetime import datetime
from typing import List, Tuple

import bpy
import mathutils
import numpy as np
import torch
from human_body_prior.body_model.body_model import BodyModel

from src.utility.MeshObjectUtility import MeshObject
from src.utility.Utility import Utility
from src.utility.loader.ObjectLoader import ObjectLoader


class AMASSLoader:
    """
    AMASS is a large database of human motion unifying 15 different optical marker-based motion capture datasets by representing them within a common framework and parameterization. All of the mocap data is convereted into realistic 3D human meshes represented by a rigged body model called SMPL, which provides a standard skeletal representation as well as a fully rigged surface mesh. Warning: Only one part of the AMASS database is currently supported by the loader! Please refer to the AMASSLoader example for more information about the currently supported datasets.

    Any human pose recorded in these motions could be reconstructed using the following parameters: `"sub_dataset_identifier"`, `"sequence id"`, `"frame id"` and `"model gender"` which will represent the pose, these parameters specify the exact pose to be generated based on the selected mocap dataset and motion category recorded in this dataset.

    Note: if this module is used with another loader that loads objects with semantic mapping, make sure the other
コード例 #13
0
from src.utility.MathUtility import MathUtility
from src.utility.SetupUtility import SetupUtility
SetupUtility.setup_pip(
    ["scikit-image", "pypng==0.0.20", "scipy", "matplotlib", "pytz"])

import os
import sys
from random import choice
from typing import List

import bpy
import numpy as np
from mathutils import Matrix, Vector

from src.utility.CameraUtility import CameraUtility
from src.utility.MeshObjectUtility import MeshObject
from src.utility.Utility import Utility
from src.utility.MathUtility import MathUtility


class BopLoader:
    @staticmethod
    def load(bop_dataset_path: str,
             temp_dir: str,
             sys_paths: list,
             model_type: str = "",
             cam_type: str = "",
             split: str = "test",
             scene_id: int = -1,
             obj_ids: list = [],
             sample_objects: bool = False,