Exemplo n.º 1
0
 def set_transforms(self, transforms):
     if 'local' in transforms:
         m = hou.Matrix4(transforms['local'])
         self.node.setParmTransform(m)
         parm = self.node.parmTransform()
         pre = parm.inverted() * m
         self.node.setPreTransform(pre)
     elif 'world' in transforms:
         m = hou.Matrix4(transforms['world'])
         self.node.setWorldTransform(m)
     else:
         raise ValueError('no acceptable matrix in transforms')
Exemplo n.º 2
0
def wrangle_preworld_medium(obj, wrangler, now):
    """Output a NamedMedium from the input oppath"""

    # Due to PBRT's scene description we can't use the standard wrangle_medium
    # when declaring a medium and its transform/colorspace when attached to a
    # camera. This is because we don't have AttributeBegin/End blocks to pop
    # the stack, so when we declare the transform for the medium we need to so
    # with respect to being in the camera's coordinate system. We'll extract
    # the translates from the camera, to establish a pivot.
    medium = obj.wrangleString(wrangler, "pbrt_exterior", now, [None])[0]

    if not medium:
        return None
    if medium in scene_state.medium_nodes:
        return None
    scene_state.medium_nodes.add(medium)

    medium_vop = BaseNode.from_node(medium)
    if medium_vop is None:
        return None
    if medium_vop.directive != "medium":
        return None

    coord_sys = medium_vop.coord_sys
    if coord_sys:
        cam_xform = hou.Matrix4(
            get_transform(obj, now, invert=False, flipz=False))
        cam_pivot = cam_xform.extractTranslates()
        cam_pivot = hou.hmath.buildTranslate(cam_pivot).inverted()
        xform = hou.Matrix4(coord_sys)
        xform *= cam_pivot

        api.Transform(xform.asTuple())

    colorspace = medium_vop.colorspace
    if colorspace:
        api.ColorSpace(colorspace)

    api.MakeNamedMedium(medium_vop.path, medium_vop.directive_type,
                        medium_vop.paramset)
    api.Identity()
    # Restore Colorspace if one was set on the Medium
    if colorspace:
        scene_cs = []
        if obj.evalString("pbrt_colorspace", now, scene_cs):
            scene_cs = scene_cs[0]
        else:
            scene_cs = "srgb"
        if scene_cs != colorspace:
            api.ColorSpace(scene_cs)

    return medium_vop.path
Exemplo n.º 3
0
def computeDirectionRotates(zDirection):

    zDirection = zDirection.normalized()

    if 1 - abs(AXIS_Y.dot(zDirection)) < EPSILON:
        quat = hou.Quaternion()
        quat.setToVectors(AXIS_Z, zDirection)

        return quat.extractEulerRotates()

    else:

        if zDirection.length() < EPSILON:
            zDirection = AXIS_Z

        z = zDirection

        y = hou.Vector3(0, 1, 0)
        x = y.cross(z)
        y = z.cross(x)

        mat = hou.Matrix4((x[0], x[1], x[2], 0, y[0], y[1], y[2], 0, z[0],
                           z[1], z[2], 0, 0, 0, 0, 1))

        return mat.extractRotates()
Exemplo n.º 4
0
    def set_world_space_rotation_and_translation_at_time(
            node_obj, time, rotation, translation):
        q = hou.Quaternion()
        q[0] = rotation[0]
        q[1] = rotation[1]
        q[2] = rotation[2]
        q[3] = rotation[3]

        m3 = q.extractRotationMatrix3()
        m4 = hou.Matrix4()

        for r in range(3):
            for c in range(3):
                m4.setAt(r, c, m3.at(r, c))

        m4.setAt(3, 0, translation[0])
        m4.setAt(3, 1, translation[1])
        m4.setAt(3, 2, translation[2])

        node_obj.setWorldTransform(m4)

        parms = ["tx", "ty", "tz", "rx", "ry", "rz"]
        for p in parms:
            parm = node_obj.parm(p)
            v = parm.eval()

            k = hou.Keyframe()
            k.setFrame(time)
            k.setValue(v)
            parm.setKeyframe(k)
Exemplo n.º 5
0
def computeDeltaXform(orig_xform, xform):

    if orig_xform.determinant() < EPSILON:

        scale = xform.extractScales()
        scale = [s if abs(s) >= EPSILON else 1 for s in scale]

        elems = xform.asTupleOfTuples()
        orig_elems = orig_xform.asTupleOfTuples()

        axes = [hou.Vector3(row[:3]) for row in orig_elems[:3]]

        is_axis_zero = [axis.length() < EPSILON for axis in axes]
        num_zero_axis = sum(is_axis_zero)

        if num_zero_axis == 1:

            zero_dim = is_axis_zero.index(True)

            alt_axis0 = axes[(zero_dim + 1) % 3]
            alt_axis1 = axes[(zero_dim + 2) % 3]

            axes[zero_dim] = alt_axis0.cross(
                alt_axis1).normalized() * scale[zero_dim]

            orig_xform = hou.Matrix4(
                (axes[0][0], axes[0][1], axes[0][2], 0, axes[1][0], axes[1][1],
                 axes[1][2], 0, axes[2][0], axes[2][1], axes[2][2], 0,
                 orig_elems[3][0], orig_elems[3][1], orig_elems[3][2], 1))

        elif num_zero_axis == 2:

            valid_dim = is_axis_zero.index(False)

            orig_axis = axes[valid_dim]
            target_axis = hou.Vector3(elems[valid_dim][:3])

            rotscale_mat = orig_axis.matrixToRotateTo(target_axis)
            rotscale_mat *= target_axis.length() / orig_axis.length()
            rotscale_mat.setAt(3, 3, 1)

            orig_translate = hou.Vector3(orig_elems[3][:3])
            target_translate = hou.Vector3(elems[3][:3])
            translate = target_translate - orig_translate

            orig_translate_mat = hou.hmath.buildTranslate(orig_translate)
            translate_mat = hou.hmath.buildTranslate(translate)

            return orig_translate_mat.inverted(
            ) * rotscale_mat * orig_translate_mat * translate_mat

        else:
            # Take account of only translate
            orig_translate = hou.Vector3(orig_elems[3][:3])
            target_translate = hou.Vector3(elems[3][:3])
            delta_translate = target_translate - orig_translate

            return hou.hmath.buildTranslate(delta_translate)

    return orig_xform.inverted() * xform
Exemplo n.º 6
0
    def control(self, node):
        parent = node.parent()
        geo = node.geometry()

        abc_file = parent.parm('abcFile').evalAsString()
        cam_path = parent.parm('cameraPath').evalAsString()

        if abc_file and abc_file != '' and cam_path != '-1' and cam_path != '':
            frame = parent.parm('samplingFrame').evalAsFloat() / hou.fps()

            #Set Transforms
            matrix = hou.Matrix4(
                abc.getWorldXform(abc_file, cam_path, frame)[0])
            trans = matrix.extractTranslates() * parent.evalParm('scaler')
            rotate = matrix.extractRotates()

            geo.setGlobalAttribValue('t', trans)
            geo.setGlobalAttribValue('r', rotate)

            #Set Camera Parameters
            cameraDict = abc.alembicGetCameraDict(abc_file, cam_path, frame)

            #Other Attributes
            geo.setGlobalAttribValue('aspect', cameraDict.get('aspect'))

            geo.setGlobalAttribValue('focal', cameraDict.get('focal'))
            geo.setGlobalAttribValue('aperture', cameraDict.get('aperture'))

            geo.setGlobalAttribValue('shutter', cameraDict.get('shutter'))
            geo.setGlobalAttribValue('focus', cameraDict.get('focus'))
            geo.setGlobalAttribValue('fstop', cameraDict.get('fstop'))
Exemplo n.º 7
0
def fast_global_registration():
    """
    Execute fast global registration

    Based on http://www.open3d.org/docs/tutorial/Advanced/fast_global_registration.html
    """
    node = hou.pwd()
    node_geo = node.geometry()
    node_geo_target = node.inputs()[1].geometry()
    voxel_size = node.parm("voxel_size").eval()
    transform = node.parm("transform").eval()

    has_fpfh_source = bool(node_geo.findPointAttrib("fpfh"))
    has_fpfh_target = bool(node_geo_target.findPointAttrib("fpfh"))

    if not has_fpfh_source or not has_fpfh_target:
        raise hou.NodeError("One of the inputs does not have 'fpfh' attribute.")

    # to numpy
    np_pos_str_source = node_geo.pointFloatAttribValuesAsString("P", float_type=hou.numericData.Float32)
    np_pos_source = np.fromstring(np_pos_str_source, dtype=np.float32).reshape(-1, 3)
    np_fpfh_str_source = node_geo.pointFloatAttribValuesAsString("fpfh", float_type=hou.numericData.Float32)
    np_fpfh_size = node_geo.findPointAttrib("fpfh").size()
    np_fpfh_source = np.fromstring(np_fpfh_str_source, dtype=np.float32).reshape(-1, np_fpfh_size)
    np_fpfh_source = np.swapaxes(np_fpfh_source, 1, 0)

    np_pos_str_target = node_geo_target.pointFloatAttribValuesAsString("P", float_type=hou.numericData.Float32)
    np_pos_target = np.fromstring(np_pos_str_target, dtype=np.float32).reshape(-1, 3)
    np_fpfh_str_target = node_geo_target.pointFloatAttribValuesAsString("fpfh", float_type=hou.numericData.Float32)
    np_fpfh_target = np.fromstring(np_fpfh_str_target, dtype=np.float32).reshape(-1, np_fpfh_size)
    np_fpfh_target = np.swapaxes(np_fpfh_target, 1, 0)

    # to open3d
    source = open3d.PointCloud()
    source.points = open3d.Vector3dVector(np_pos_source.astype(np.float64))

    source_fpfh = open3d.registration.Feature()
    source_fpfh.resize(np_fpfh_source.shape[0], np_fpfh_source.shape[1])
    source_fpfh.data = np_fpfh_source.astype(np.float64)

    target = open3d.PointCloud()
    target.points = open3d.Vector3dVector(np_pos_target.astype(np.float64))

    target_fpfh = open3d.registration.Feature()
    target_fpfh.resize(np_fpfh_source.shape[0], np_fpfh_source.shape[1])
    target_fpfh.data = np_fpfh_target.astype(np.float64)

    # registration
    registration = open3d.registration_fast_based_on_feature_matching(source, target, source_fpfh, target_fpfh, open3d.FastGlobalRegistrationOption(maximum_correspondence_distance = voxel_size * 0.5))

    registration_xform = hou.Matrix4(registration.transformation)
    registration_xform = registration_xform.transposed()

    # to houdini
    if transform:
        node_geo.transform(registration_xform)
    
    node_geo.addAttrib(hou.attribType.Global, "xform", default_value=(0.0,)*16, create_local_variable=False)
    node_geo.setGlobalAttribValue("xform", registration_xform.asTuple())
Exemplo n.º 8
0
def prim_transform(prim):
    """Return a tuple representing the Matrix4 of the transform intrinsic"""
    rot_mat = hou.Matrix3(prim.intrinsicValue("transform"))
    vtx = prim.vertex(0)
    pt = vtx.point()
    pos = pt.position()
    xlate = hou.hmath.buildTranslate(pos)
    return (hou.Matrix4(rot_mat) * xlate).asTuple()
Exemplo n.º 9
0
    def __init__(self, scene_viewer):
        self.geo = hou.Geometry()
        self.knob_geo = hou.Geometry()
        self.knob_pt = self.knob_geo.createPoint()
        self.scene_viewer = scene_viewer
        self.sphere_verb = hou.sopNodeTypeCategory().nodeVerb("sphere")
        self.sphere_verb.setParms({'type': 2, 'rows': 32, 'cols': 32})

        self.sphere_verb.execute(self.geo, [])

        self.circle_verb = hou.sopNodeTypeCategory().nodeVerb("circle")
        self.circle_verb.setParms({'type': 1, 'divs': 32})

        self.circle_geo = hou.Geometry()
        self.circle_verb.execute(self.circle_geo, [])

        self.drawable = hou.GeometryDrawable(
            self.scene_viewer,
            hou.drawableGeometryType.Face,
            "rollerball",
            params={
                'style': hou.drawableGeometryFaceStyle.Plain,
                'backface_culling': 1,
                'color1': hou.Vector4(0.2, 0.2, 0.2, 0.5),
                'fade_factor': 0.2
            })

        self.knob_drawable = hou.GeometryDrawable(
            self.scene_viewer,
            hou.drawableGeometryType.Point,
            "rollerball_knob",
            params={
                'style': hou.drawableGeometryPointStyle.SmoothCircle,
                'color1': hou.Vector4(1.0, 1.0, 1.0, 1.0),
                'radius': 6,
                'fade_factor': 0.5
            })
        self.knob_drawable.setGeometry(self.knob_geo)
        self.drawable.setGeometry(self.geo)

        self.is_behind = False
        self.is_ball = True

        self.circle_N = hou.Vector3(0, 1, 0)

        self.start_transform = hou.Matrix4(1)
        self.start_vec = hou.Vector3()
        self.start_mouse_x = 0.0
        self.start_mouse_y = 0.0
        self.start_ray_dir = hou.Vector3(0, 0, 0)
        self.start_ray_origin = hou.Vector3(0, 0, 0)

        # vector for twist
        self.primary_axis = hou.Vector3(0, 0, 1)
        # vector for bend
        self.secondary_axis = hou.Vector3(1, 0, 0)

        self.axis = hou.Vector3(0, 0, 1)
Exemplo n.º 10
0
def get_motionblur_xforms(obj, now, mblur_parms):
    times = xform_mbsamples(obj, now, **mblur_parms)
    xforms = []
    for t in times:
        xform = []
        obj.evalFloat("space:world", t, xform)
        xform = list(hou.Matrix4(xform).transposed().asTuple())
        xforms += [xform]
    return xforms, times
Exemplo n.º 11
0
    def build_geo(self):
        # Pre build bgeos
        for archive in self.archives:
            bgeo = self.element.obj2bgeo(archive)

        archive_data = self.load_json_file()
        all_geo = hou.Geometry()
        name_atr = all_geo.findPrimAttrib('name')
        if not name_atr:
            name_atr = all_geo.addAttrib(hou.attribType.Prim,
                                         'name',
                                         '',
                                         create_local_variable=False)
        for obj in archive_data:
            logging.info('Creating %i Prims for %s', len(archive_data[obj]),
                         obj)
            # Load a packed prim
            bgeo = self.element.obj2bgeo(obj)

            # Copy Packed Prims
            if self.copy_packed_prims:
                bgeo_gdp = hou.Geometry()
                diskprim = bgeo_gdp.createPacked('PackedDisk')
                diskprim.setIntrinsicValue('unexpandedfilename', bgeo)
                # Now we are going to pack it so we can copy it
                packed_geo = pack_geo(bgeo_gdp)
                for name, xform in archive_data[obj].iteritems():
                    logging.debug('built %s', name)
                    for prim in packed_geo.prims():
                        prim.setAttribValue('name', name)
                        prim.setTransform(hou.Matrix4(xform))
                    all_geo.merge(packed_geo)
                packed_geo.clear()
                # We can't clear this as its still being referenced
                # by the all_geo
                # bgeo_gdp.clear()
            else:
                for name, xform in archive_data[obj].iteritems():
                    logging.debug('built %s', name)
                    diskprim = all_geo.createPacked('PackedDisk')
                    diskprim.setIntrinsicValue('unexpandedfilename', bgeo)
                    diskprim.setAttribValue(name_atr, name)
                    diskprim.setTransform(hou.Matrix4(xform))
        return all_geo
Exemplo n.º 12
0
def heightfield_prim_wrangler(prims,
                              paramset=None,
                              properties=None,
                              override_node=None):
    """Outputs a "heightfield" Shapes for the input geometry

    Args:
        prims (list of hou.Prims): Input prims
        paramset (ParamSet): Any base params to add to the shape. (Optional)
        properties (dict): Dictionary of SohoParms (Optional)
    Returns: None
    """

    for prim in prims:
        resolution = prim.resolution()
        # If the z resolution is not 1 then this really isn't a heightfield
        if resolution[2] != 1:
            continue

        hf_paramset = ParamSet()

        # Similar to trianglemesh, this is more compact fast, however it might
        # be a memory issue and a generator per voxel or per slice might be a
        # better approach.
        voxeldata = array.array("f")
        voxeldata.fromstring(prim.allVoxelsAsString())

        with api.TransformBlock():

            xform = prim_transform(prim)
            xform = hou.Matrix4(xform)
            srt = xform.explode()
            # Here we need to split up the xform mainly so we can manipulate
            # the scale. In particular Houdini's prim xforms maintain a
            # rotation matrix but the z scale is ignored. So here we are
            # setting it directly to 1 as the "Pz" (or voxeldata) will always
            # be the exact height, no scales are applied to the prim xform.
            # We also need to scale up heightfield since in Houdini by default
            # the size is -1,-1,-1 to 1,1,1 where in pbrt its 0,0,0 to 1,1,1
            api.Translate(*srt["translate"])
            rot = srt["rotate"]
            if rot.z():
                api.Rotate(rot[2], 0, 0, 1)
            if rot.y():
                api.Rotate(rot[1], 0, 1, 0)
            if rot.x():
                api.Rotate(rot[0], 1, 0, 0)
            api.Scale(srt["scale"][0] * 2.0, srt["scale"][1] * 2.0, 1.0)
            api.Translate(-0.5, -0.5, 0)
            hf_paramset.add(PBRTParam("integer", "nu", resolution[0]))
            hf_paramset.add(PBRTParam("integer", "nv", resolution[1]))
            hf_paramset.add(PBRTParam("float", "Pz", voxeldata))
            hf_paramset |= paramset
            hf_paramset |= prim_override(prim, override_node)
            api.Shape("heightfield", hf_paramset)
    return
Exemplo n.º 13
0
def get_transform(obj, now, invert=False, flipz=False):
    xform = []
    if not obj.evalFloat('space:world', now, xform):
        return None
    xform = hou.Matrix4(xform)
    if invert:
        xform = xform.inverted()
    if flipz:
        xform = xform*hou.hmath.buildScale(1, 1, -1)
    return list(xform.asTuple())
Exemplo n.º 14
0
def clearWorldRotates(n):
    parent = n.inputs()[0]
    n.parm("keeppos").set(1)
    n.setInput(0, None)

    n.setParmTransform(n.parmTransform() * n.preTransform())
    n.parmTuple("r").set((0, 0, 0))
    n.setPreTransform(n.parmTransform())
    n.setParmTransform(hou.Matrix4(1))

    n.setInput(0, parent)
Exemplo n.º 15
0
def get_transform(obj, now, invert=False, flipx=False, flipy=False, flipz=False):
    xform = []
    if not obj.evalFloat("space:world", now, xform):
        return None
    xform = hou.Matrix4(xform)
    if invert:
        xform = xform.inverted()
    x = -1 if flipx else 1
    y = -1 if flipy else 1
    z = -1 if flipz else 1
    xform = xform * hou.hmath.buildScale(x, y, z)
    return list(xform.asTuple())
Exemplo n.º 16
0
def computeDeltaXform(origXform, xform):

    if origXform.determinant() < EPSILON:
        # if either scale equals to zero, just calc translation
        t = origXform.extractTranslates()

        origXform = hou.Matrix4(xform.asTuple())
        origXform.setAt(3, 0, t[0])
        origXform.setAt(3, 1, t[1])
        origXform.setAt(3, 2, t[2])

    return origXform.inverted() * xform
Exemplo n.º 17
0
    def test_buildInstanceOrient(self):
        TARGET = hou.Matrix4(
            ((0.33212996389891691, 0.3465703971119134, -0.87725631768953083,
              0.0), (-0.53068592057761732, 0.83754512635379064,
                     0.1299638989169675, 0.0),
             (0.77978339350180514, 0.42238267148014441, 0.46209386281588438,
              0.0), (-1.0, 2.0, 4.0, 1.0)))

        mat = hou.hmath.buildInstance(hou.Vector3(-1, 2, 4),
                                      orient=hou.Quaternion(
                                          0.3, -1.7, -0.9, -2.7))

        self.assertEqual(mat, TARGET)
Exemplo n.º 18
0
    def test_buildInstance(self):
        TARGET = hou.Matrix4(
            ((1.0606601717798214, -1.0606601717798214, 0.0, 0.0),
             (0.61237243569579436, 0.61237243569579436, -1.2247448713915889,
              0.0), (0.86602540378443882, 0.86602540378443882,
                     0.86602540378443882, 0.0), (-1.0, 2.0, 4.0, 1.0)))

        mat = hou.hmath.buildInstance(hou.Vector3(-1, 2, 4),
                                      hou.Vector3(1, 1, 1),
                                      pscale=1.5,
                                      up=hou.Vector3(1, 1, -1))

        self.assertEqual(mat, TARGET)
Exemplo n.º 19
0
def cpSetNormal(normal_vector,construction_plane = []):
	if construction_plane == []:
		construction_plane = toolutils.sceneViewer().constructionPlane()
	# revert center offset
	t = hou.hmath.buildTranslate(hou.Vector3([-2,-2,0]))
	construction_plane.setTransform(t.inverted()*construction_plane.transform())
	# process
	existing_rotation = hou.Matrix4(construction_plane.transform().extractRotationMatrix3())
	rotation = existing_rotation * cpNormal(construction_plane).matrixToRotateTo(normal_vector)
	translation = hou.hmath.buildTranslate(cpOrigin(construction_plane))
	construction_plane.setTransform(rotation * translation) 
	# apply center offset
	construction_plane.setTransform(t*construction_plane.transform())
Exemplo n.º 20
0
def getReferencePlaneXform(scene_viewer):
    viewport = scene_viewer.curViewport()
    cplane = scene_viewer.constructionPlane()

    if cplane.isVisible():
        xform = cplane.transform()
    elif viewport.type() == hou.geometryViewportType.Perspective:
        xform = scene_viewer.referencePlane().transform()
    else:
        rot = viewport.viewTransform().extractRotationMatrix3()
        xform = hou.Matrix4(rot)

    return xform
Exemplo n.º 21
0
 def build_geo(self):
     element_json = './json/{0}/{0}.json'.format(self.json_data['element'])
     element = Element(element_json)
     element_data = self.load_json_file()
     all_geo = hou.Geometry()
     for variant, elements in element_data.iteritems():
         variant_geo = element.build_element_geo(variant)
         for variant_name, xform in elements.iteritems():
             logging.debug('Copied element %s', variant)
             for prim in variant_geo.prims():
                 prim.setAttribValue('name', variant_name)
                 prim.setTransform(hou.Matrix4(xform))
             all_geo.merge(variant_geo)
         variant_geo.clear()
     return all_geo
Exemplo n.º 22
0
def xform_to_api_srt(xform, scale=True, rotate=True, trans=True):
    xform = hou.Matrix4(xform)
    srt = xform.explode()
    if trans:
        api.Translate(*srt['translate'])
    if rotate:
        # NOTE, be wary of -180 to 180 flips
        rot = srt['rotate']
        if rot.z():
            api.Rotate(rot[2], 0, 0, 1)
        if rot.y():
            api.Rotate(rot[1], 0, 1, 0)
        if rot.x():
            api.Rotate(rot[0], 1, 0, 0)
    if scale:
        api.Scale(*srt['scale'])
    return
Exemplo n.º 23
0
def mts_objects(obj, now, mat):
    """Add objects to mts_proxy... dict:
    dict = {'/obj/geo1/facet1': 
    (string_matrix4x4, proxy_file_path), 
     ...}
    """
    soppath = []
    if not obj.evalString('object:soppath', now, soppath):
        return
    geo = SohoGeometry(soppath[0], now)
    if geo.Handle < 0:
        return       
    plist = obj.evaluate(objectParms, now)
    xform = plist['space:world'].Value
    
    if len(xform) != 16:
        return  
    xform = hou.Matrix4(xform)
    xform = mts_matrix(xform)
    mts_proxy_files[soppath[0]]  = (xform, saveGeometry(soppath[0], now, mts_proxy_path), mat)
Exemplo n.º 24
0
    def setObjectMatrix(self, object, matrice, hostmatrice=None):
        """
        set a matrix to an hostObject
    
        @type  object: hostObject
        @param object: the object who receive the transformation 
        @type  hostmatrice: hou.Matrix4 
        @param hostmatrice: transformation matrix in host format
        @type  matrice: list/Matrix
        @param matrice: transformation matrix in epmv/numpy format
        """

        if hostmatrice != None:
            #set the instance matrice (hou.Matrix4)
            object.setWorldTransform(hostmatrice)
        if matrice != None:
            #convert the matrice in host format
            hm = hou.Matrix4(matrice.tolist())
            #set the instance matrice
            object.setWorldTransform(hm)
def set_transform(light):
    hmatrix = hou.Matrix4()
    hmatrix.setAt(0, 0, light['matrix'][0])
    hmatrix.setAt(0, 1, light['matrix'][1])
    hmatrix.setAt(0, 2, light['matrix'][2])
    hmatrix.setAt(0, 3, light['matrix'][3])
    hmatrix.setAt(1, 0, light['matrix'][4])
    hmatrix.setAt(1, 1, light['matrix'][5])
    hmatrix.setAt(1, 2, light['matrix'][6])
    hmatrix.setAt(1, 3, light['matrix'][7])
    hmatrix.setAt(2, 0, light['matrix'][8])
    hmatrix.setAt(2, 1, light['matrix'][9])
    hmatrix.setAt(2, 2, light['matrix'][10])
    hmatrix.setAt(2, 3, light['matrix'][11])
    hmatrix.setAt(3, 0, light['matrix'][12])
    hmatrix.setAt(3, 1, light['matrix'][13])
    hmatrix.setAt(3, 2, light['matrix'][14])
    hmatrix.setAt(3, 3, light['matrix'][15])

    return hmatrix
Exemplo n.º 26
0
 def get_look_at_matrix(selfPos, targetPos, upDirection):
     zaxis = hou.Vector3(targetPos - selfPos).normalized()
     xaxis = upDirection.cross(zaxis).normalized()
     yaxis = zaxis.cross(xaxis)
     resultMatrix = hou.Matrix4()
     resultMatrix.setAt(0, 0, xaxis.x())
     resultMatrix.setAt(0, 1, yaxis.x())
     resultMatrix.setAt(0, 2, zaxis.x())
     resultMatrix.setAt(0, 3, 0)
     resultMatrix.setAt(1, 0, xaxis.y())
     resultMatrix.setAt(1, 1, yaxis.y())
     resultMatrix.setAt(1, 2, zaxis.y())
     resultMatrix.setAt(1, 3, 0)
     resultMatrix.setAt(2, 0, xaxis.z())
     resultMatrix.setAt(2, 1, yaxis.z())
     resultMatrix.setAt(2, 2, zaxis.z())
     resultMatrix.setAt(2, 3, 0)
     resultMatrix.setAt(3, 0, 0)#-xaxis.dot(selfPos))
     resultMatrix.setAt(3, 1, 0)#-yaxis.dot(selfPos))
     resultMatrix.setAt(3, 2, 0)#-zaxis.dot(selfPos))
     resultMatrix.setAt(3, 3, 1)
     return resultMatrix
Exemplo n.º 27
0
def qapply(q, v):
    return v * hou.Matrix4(q.extractRotationMatrix3())
        max_tries = 10
        better_zoom_findable = True
        while ((best_visible_points != max_visible_points
                or better_zoom_findable) and tries < max_tries):
            visible_points_samples = []
            zoom_out_samples = []
            for sample in range(sample_size):
                if sample == 0:
                    zoom_out = best_zoom_out
                else:
                    zoom_out = random.uniform(0, zoom_range_max)
                visible_points = 0
                camera_normal = plane_normal * zoom_out
                new_translation = hou.Matrix4(
                    (1, 0, 0, boundary_center[0] + camera_normal[0], 0, 1, 0,
                     boundary_center[1] + camera_normal[1], 0, 0, 1,
                     boundary_center[2] + camera_normal[2], 0, 0, 0,
                     1)).transposed()
                hou.session.reset_camera(camera)
                camera.setWorldTransform(rotation_x * rotation_y *
                                         new_translation)

                for point in points:
                    uv_coord = point.attribValue("uv")
                    if (uv_coord[0] >= 0 and uv_coord[0] <= 1
                            and uv_coord[1] >= 0 and uv_coord[1] <= 1
                            and not all(v == 0 for v in uv_coord)):
                        visible_points += 1
                visible_points_samples.append(visible_points)
                zoom_out_samples.append(zoom_out)
Exemplo n.º 29
0
def create_cam():

    seq = hou.ui.selectFile(title="Select first frame of EXR Sequence",
                            collapse_sequences=True,
                            pattern="*.exr",
                            width=800,
                            height=600)

    if seq == "":
        print "Cancelled..."
        sys.exit()
    seq = hou.expandString(seq)

    fps = hou.ui.readInput("Set FPS for Image Sequence",
                           buttons=(
                               'OK',
                               'Cancel',
                           ),
                           default_choice=0,
                           close_choice=1,
                           title=None,
                           initial_contents="60")

    if fps[0] == 1:
        print "Cancelled..."
        sys.exit()
    else:
        fps = int(fps[1])

    print "\nStart Camera creation from EXR Sequence..."
    print "First Frame:"
    print seq
    print "\n"

    #create copnet
    img = hou.node("/img").createNode("img", "img")
    cop = img.createNode("file", "read_seq")
    cop.parm("nodename").set(0)

    fileparm = cop.parm("filename1")
    fileparm.set("")

    split = os.path.split(seq)
    dir = split[0]
    start = split[1].split("_")[0]
    temp = split[1].split("_")[-1]
    mid = temp.split(".")[0]
    end = temp.split(".")[-1]

    pad = len(mid)
    start_frame = int(mid)
    num_frames = len(os.listdir(dir))
    end_frame = num_frames - 1

    print "Start Frame: {}".format(start_frame)
    print "End Frame: {}".format(end_frame)
    print "Number of Frames: {}".format(num_frames)
    print "FPS: {}\n".format(fps)

    #houdini scene setup
    hou.hscript("fps {}".format(fps))
    hou.hscript("tset {} {}".format(
        float(-1) / float(fps),
        float(end_frame) / float(fps)))
    hou.hscript("frange {} {}".format(start_frame, end_frame))
    hou.hscript("fcur 0")

    #static metadata
    fileparm.set(seq)

    meta = cop.getMetaDataString("attributes")
    meta = eval(meta)
    width = cop.xRes()
    height = cop.yRes()
    aspect = float(height) / float(width)

    perspective_type = meta[
        "perspective_type"]  #"persp_three_point", "persp_equirectangular",
    stereo_enabled = meta["stereo_enabled"]  #"yes", "no"
    stereo_infinite_correction = int(
        meta["stereo_infinite_correction"])  #[0, 1]

    print "Perspective Type: {}".format(perspective_type)
    print "Stereo Enabled: {}".format(stereo_enabled)
    print "Stereo Infinite Correction: {}".format(stereo_infinite_correction)
    print "Width: {}".format(width)
    print "Height: {}".format(height)
    print "Aspect: {}".format(aspect)

    #create camera
    cam = ""

    vr_cam_typename_to_create = "vrcam"
    cam_aperture_base = 36
    cam_vr_focal = 18

    def create_vr_cam():
        cam = hou.node("/obj").createNode(vr_cam_typename_to_create)
        cam.parm("vrmergemode").set(2)
        cam.parm("vrmergeangle").set(90)
        cam.parm("vreyetoneckdistance").set(0)
        vr_layout_parm = cam.parm("vrlayout")
        if aspect == 0.5:
            vr_layout_parm.set(2)
        if aspect == 1:
            vr_layout_parm.set(1)
        if aspect == 0.25:
            vr_layout_parm.set(0)

        return cam

    if stereo_enabled == "yes":
        if perspective_type == "persp_equirectangular":
            cam = create_vr_cam()
        elif perspective_type == "persp_three_point":
            cam = hou.node("/obj").createNode("stereocamrig")
        else:
            raise Exception(
                "Perspective Type '{}' not supported by Houdini.".format(
                    perspective_type))

    if stereo_enabled == "no":
        if perspective_type == "persp_equirectangular":
            cam = create_vr_cam()
        elif perspective_type == "persp_three_point":
            cam = hou.node("/obj").createNode("cam")
        else:
            raise Exception(
                "Perspective Type '{}' not supported by Houdini.".format(
                    perspective_type))

    #set res
    cam.parm("resx").set(width)
    cam.parm("resy").set(height)

    # start loop
    print "\nStart iterating over frames...\n"

    keys_tx = []
    keys_ty = []
    keys_tz = []

    keys_rx = []
    keys_ry = []
    keys_rz = []

    keys_targetx = []
    keys_targety = []
    keys_targetz = []

    keys_topx = []
    keys_topy = []
    keys_topz = []

    keys_focal = []
    keys_stereo = []

    for i in range(num_frames):
        frame = str(i).zfill(pad)
        file = "{}/{}_{}.{}".format(dir, start, frame, end)
        #print "Current File: {}".format(file)
        fileparm.set(file)

        meta = cop.getMetaDataString("attributes")
        meta = eval(meta)

        #get values from metadata

        #camera position
        translate_x = float(meta["camera.x"])
        translate_y = float(meta["camera.y"])
        translate_z = float(meta["camera.z"])
        translate = hou.Vector3(translate_x, translate_y, translate_z)

        key_tx = hou.Keyframe(translate_x, hou.frameToTime(i))
        keys_tx.append(key_tx)
        key_ty = hou.Keyframe(translate_y, hou.frameToTime(i))
        keys_ty.append(key_ty)
        key_tz = hou.Keyframe(translate_z, hou.frameToTime(i))
        keys_tz.append(key_tz)

        #camera rotation / not correctly exported from mandelbulber
        #thus cam xform matrix calculated from cam vectors
        '''
		#correct mandelbulber meta output
		rotate_x = 90 - float(meta["camera_rotation.y"])
		rotate_y = float(meta["camera_rotation.z"])
		rotate_z = float(meta["camera_rotation.x"])
		'''

        #calculate rotations from cam vectors

        #camera target
        target_x = float(meta["target.x"])
        target_y = float(meta["target.y"])
        target_z = float(meta["target.z"])
        target = hou.Vector3(target_x, target_y, target_z)

        #camera top (up)
        top_x = float(meta["top.x"])
        top_y = float(meta["top.y"])
        top_z = float(meta["top.z"])
        top = hou.Vector3(top_x, top_y, top_z)

        # calculate vectors
        forward = (translate - target).normalized()
        right = top.normalized().cross(forward)
        up = forward.cross(right)

        #build matrix
        right_c = hou.Vector4(right.x(), right.y(), right.z(), 0)
        up_c = hou.Vector4(up.x(), up.y(), up.z(), 0)
        forward_c = hou.Vector4(forward.x(), forward.y(), forward.z(), 0)
        translate_c = hou.Vector4(translate.x(), translate.y(), translate.z(),
                                  1)
        m = hou.Matrix4((right_c, up_c, forward_c, translate_c))

        #extract rotations
        pivot_v = hou.Vector3(0, 0, 0)
        pivot_rotate_v = hou.Vector3(0, 0, 0)
        rotate = m.extractRotates(transform_order='srt',
                                  rotate_order='xyz',
                                  pivot=pivot_v,
                                  pivot_rotate=pivot_rotate_v)
        rotate_x = rotate.x()
        rotate_y = rotate.y()
        rotate_z = rotate.z()

        key_rx = hou.Keyframe(rotate_x, hou.frameToTime(i))
        keys_rx.append(key_rx)
        key_ry = hou.Keyframe(rotate_y, hou.frameToTime(i))
        keys_ry.append(key_ry)
        key_rz = hou.Keyframe(rotate_z, hou.frameToTime(i))
        keys_rz.append(key_rz)

        fov = float(meta["fov"])
        #calulate focal length based on fov and "cam_aperture_base"
        focal = cam_aperture_base / (2 * math.tan(math.radians(fov) / 2))
        key_focal = hou.Keyframe(focal, hou.frameToTime(i))
        keys_focal.append(key_focal)

        stereo_eye_distance = 2 * float(meta["stereo_eye_distance"])
        key_stereo = hou.Keyframe(stereo_eye_distance, hou.frameToTime(i))
        keys_stereo.append(key_stereo)

        #print "\nFrame: {}".format(frame)
        #print "Translate: ({}, {}, {})".format(translate_x, translate_y, translate_z)
        #print "Rotate: ({}, {}, {})".format(rotate_x, rotate_y, rotate_z)
        #print "Stereo Distance: {}".format(stereo_eye_distance)

    #set keyframes
    parm_tx = cam.parm("tx")
    parm_tx.setKeyframes(keys_tx)
    parm_ty = cam.parm("ty")
    parm_ty.setKeyframes(keys_ty)
    parm_tz = cam.parm("tz")
    parm_tz.setKeyframes(keys_tz)

    parm_rx = cam.parm("rx")
    parm_rx.setKeyframes(keys_rx)
    parm_ry = cam.parm("ry")
    parm_ry.setKeyframes(keys_ry)
    parm_rz = cam.parm("rz")
    parm_rz.setKeyframes(keys_rz)

    parm_aperture = cam.parm("aperture")
    parm_aperture.set(cam_aperture_base)
    parm_focal = cam.parm("focal")

    if perspective_type == "persp_equirectangular":
        parm_focal.set(cam_vr_focal)
        parm_stereo = cam.parm("vreyeseparation")
        if stereo_enabled == "yes":
            parm_stereo.setKeyframes(keys_stereo)
        else:
            parm_stereo.set(0)
    else:
        parm_focal.setKeyframes(keys_focal)
        if stereo_enabled == "yes":
            parm_stereo = cam.parm("interaxial")
            parm_stereo.setKeyframes(keys_stereo)

    #delete img node
    img.destroy()

    #select camera
    cam.setSelected(1, 1)
    print "\nCamera successfully created from Mandelbulber EXR Image Sequence.\n\n"
Exemplo n.º 30
0
        camera = hou.node('/obj/oz_camera_' + str(i))
    else:
        camera = hou.node('/obj').createNode('cam', 'oz_camera_' + str(i))
    if (not fit_fail):
        plane_normal = np.array(
            [a, b, -1]) / math.sqrt(math.pow(a, 2) + math.pow(b, 2) + 1)
        plane_dist = c / math.sqrt(math.pow(a, 2) + math.pow(
            b, 2) + 1)  # TODO: p > 0 or p < 0 half-space origin test
    else:
        plane_normal = np.array([0.001, 1, 0.001]) * (1 if (a >= 0) else -1)
        plane_dist = 0.001
    if boundary_normal.angleTo(hou.Vector3(plane_normal)) > 90:
        plane_normal = -1 * plane_normal

    translation = hou.Matrix4(
        (1, 0, 0, boundary_center[0], 0, 1, 0, boundary_center[1], 0, 0, 1,
         boundary_center[2], 0, 0, 0, 1)).transposed()
    v = math.sqrt(math.pow(plane_normal[0], 2) + math.pow(plane_normal[2], 2))
    rotation_y = hou.Matrix4(
        (plane_normal[2] / v, 0, -1 * plane_normal[0] / v, 0, 0, 1, 0, 0,
         plane_normal[0] / v, 0, plane_normal[2] / v, 0, 0, 0, 0, 1))
    d = math.sqrt(
        math.pow(plane_normal[0], 2) + math.pow(plane_normal[1], 2) +
        math.pow(plane_normal[2], 2))
    rotation_x = hou.Matrix4((1, 0, 0, 0, 0, v / d, -1 * plane_normal[1] / d,
                              0, 0, plane_normal[1] / d, v / d, 0, 0, 0, 0, 1))
    camera.setWorldTransform(rotation_x * rotation_y * translation)
    resolution = (1280, 720)
    camera.parm('resx').set(resolution[0])
    camera.parm('resy').set(resolution[1])
    resolutions_x.append(resolution[0])