コード例 #1
0
def getUVCoord(mesh, face, point, image):
    """ returns UV coordinate of target point in source mesh image texture
    mesh  -- mesh data from source object
    face  -- face object from mesh
    point -- coordinate of target point on source mesh
    image -- image texture for source mesh
    """
    # get active uv layer data
    uv_layer = mesh.uv_layers.active
    if uv_layer is None:
        return None
    uv = uv_layer.data
    # get 3D coordinates of face's vertices
    lco = [mesh.vertices[i].co for i in face.vertices]
    # get uv coordinates of face's vertices
    luv = [uv[i].uv for i in face.loop_indices]
    # calculate barycentric weights for point
    lwts = poly_3d_calc(lco, point)
    # multiply barycentric weights by uv coordinates
    uv_loc = sum((p * w for p, w in zip(luv, lwts)), Vector((0, 0)))
    # ensure uv_loc is in range(0,1)
    # TODO: possibly approach this differently? currently, uv verts that are outside the image are wrapped to the other side
    uv_loc = Vector((uv_loc[0] % 1, uv_loc[1] % 1))
    # convert uv_loc in range(0,1) to uv coordinate
    image_size_x, image_size_y = image.size
    x_co = round(uv_loc.x * (image_size_x - 1))
    y_co = round(uv_loc.y * (image_size_y - 1))
    uv_coord = (x_co, y_co)

    # return resulting uv coordinate
    return Vector(uv_coord)
コード例 #2
0
def _smooth_normal(obj_data, loc, index):
    vert_indices = [obj_data.loops[i].vertex_index for i in obj_data.polygons[index].loop_indices]
    vert_coords = [obj_data.vertices[i].co for i in vert_indices]
    vert_normals = [obj_data.vertices[i].normal for i in vert_indices]
    weights = poly_3d_calc(vert_coords, loc)
    # print('weights:', weights)
    # self.report({'INFO'}, str(vert_normals))
    sum = Vector((0.0, 0.0, 0.0))
    for i in range(len(weights)):
        sum += weights[i] * vert_normals[i]
    return sum
コード例 #3
0
def smooth_normal(obj, loc, index):
    obj_data = obj.to_mesh(bpy.context.scene, True, 'PREVIEW')
    vert_indices = [obj_data.loops[i].vertex_index for i in obj_data.polygons[index].loop_indices]
    vert_coords = [obj_data.vertices[i].co for i in vert_indices]
    vert_normals = [obj_data.vertices[i].normal for i in vert_indices]
    weights = poly_3d_calc(vert_coords, loc)
    # self.report({'INFO'}, str(vert_normals))
    sum = Vector((0.0, 0.0, 0.0))
    for i in range(len(weights)):
        sum += weights[i] * vert_normals[i]
    return (sum / len(weights))
コード例 #4
0
def _smooth_normal(obj, loc, index):
    obj_data = obj.to_mesh(bpy.context.scene, True, 'PREVIEW')
    vert_indices = [
        obj_data.loops[i].vertex_index
        for i in obj_data.polygons[index].loop_indices
    ]
    vert_coords = [obj_data.vertices[i].co for i in vert_indices]
    vert_normals = [obj_data.vertices[i].normal for i in vert_indices]
    weights = poly_3d_calc(vert_coords, loc)
    # self.report({'INFO'}, str(vert_normals))
    sum = Vector((0.0, 0.0, 0.0))
    for i in range(len(weights)):
        sum += weights[i] * vert_normals[i]
    return (sum / len(weights))
コード例 #5
0
def _smooth_normal(obj_data, loc, index):
    vert_indices = [
        obj_data.loops[i].vertex_index
        for i in obj_data.polygons[index].loop_indices
    ]
    vert_coords = [obj_data.vertices[i].co for i in vert_indices]
    vert_normals = [obj_data.vertices[i].normal for i in vert_indices]
    weights = poly_3d_calc(vert_coords, loc)
    # print('weights:', weights)
    # self.report({'INFO'}, str(vert_normals))
    sum = Vector((0.0, 0.0, 0.0))
    for i in range(len(weights)):
        sum += weights[i] * vert_normals[i]
    return sum
コード例 #6
0
ファイル: blob.py プロジェクト: BitByte01/myblendercontrib
def make_blobs(context, gridob, groundob, samples2D, display_radius):
    blob_group_clear(context)
    blobs = []

    imat = groundob.matrix_world.inverted()

    blobtree = KDTree(len(gridob.data.vertices))
    for i, v in enumerate(gridob.data.vertices):
        co = gridob.matrix_world * v.co
        # note: only using 2D coordinates, otherwise weights get distorted by z offset
        blobtree.insert((co[0], co[1], 0.0), i)
    blobtree.balance()

    for v in gridob.data.vertices:
        co = gridob.matrix_world * v.co
        ok, loc, nor, poly_index = project_on_ground(groundob, co)
        blobs.append(Blob(loc, nor, poly_index) if ok else None)

    with progress.ProgressContext("Grouping Samples", 0, len(samples2D)):
        mpolys = groundob.data.polygons
        mverts = groundob.data.vertices
        for xy in samples2D:
            progress.progress_add(1)

            # note: use only 2D coordinates for weighting, z component should be 0
            index = assign_blob(blobtree, (xy[0], xy[1], 0.0), nor)
            if index < 0:
                continue
            blob = blobs[index]
            if blob is None:
                continue

            # project samples onto the ground object
            ok, sloc, snor, spoly = project_on_ground(groundob,
                                                      xy[0:2] + (0, ))
            if not ok:
                continue

            # calculate barycentric vertex weights on the poly
            poly = mpolys[spoly]
            sverts = list(poly.vertices)
            # note: coordinate space has to be consistent, use sloc in object space
            sweights = poly_3d_calc(tuple(mverts[i].co for i in sverts),
                                    imat * sloc)

            blob.add_sample(sloc, snor, spoly, sverts, sweights)

    blobs_to_customprops(groundob.meadow, blobs)

    make_blob_visualizer(context, groundob, blobs, display_radius, hide=True)
コード例 #7
0
ファイル: blob.py プロジェクト: BitByte01/myblendercontrib
def make_blobs(context, gridob, groundob, samples2D, display_radius):
    blob_group_clear(context)
    blobs = []
    
    imat = groundob.matrix_world.inverted()

    blobtree = KDTree(len(gridob.data.vertices))
    for i, v in enumerate(gridob.data.vertices):
        co = gridob.matrix_world * v.co
        # note: only using 2D coordinates, otherwise weights get distorted by z offset
        blobtree.insert((co[0], co[1], 0.0), i)
    blobtree.balance()
    
    for v in gridob.data.vertices:
        co = gridob.matrix_world * v.co
        ok, loc, nor, poly_index = project_on_ground(groundob, co)
        blobs.append(Blob(loc, nor, poly_index) if ok else None)
    
    with progress.ProgressContext("Grouping Samples", 0, len(samples2D)):
        mpolys = groundob.data.polygons
        mverts = groundob.data.vertices
        for xy in samples2D:
            progress.progress_add(1)

            # note: use only 2D coordinates for weighting, z component should be 0
            index = assign_blob(blobtree, (xy[0], xy[1], 0.0), nor)
            if index < 0:
                continue
            blob = blobs[index]
            if blob is None:
                continue
            
            # project samples onto the ground object
            ok, sloc, snor, spoly = project_on_ground(groundob, xy[0:2]+(0,))
            if not ok:
                continue
            
            # calculate barycentric vertex weights on the poly
            poly = mpolys[spoly]
            sverts = list(poly.vertices)
            # note: coordinate space has to be consistent, use sloc in object space
            sweights = poly_3d_calc(tuple(mverts[i].co for i in sverts), imat * sloc)

            blob.add_sample(sloc, snor, spoly, sverts, sweights)
    
    blobs_to_customprops(groundob.meadow, blobs)

    make_blob_visualizer(context, groundob, blobs, display_radius, hide=True)
コード例 #8
0
def get_uv_coord(obj: Object,
                 face_idx: int,
                 point: Vector,
                 image: Image,
                 mapping_loc: Vector = Vector((0, 0)),
                 mapping_scale: Vector = Vector((1, 1))):
    """ returns UV coordinate of target point in source mesh image texture
    mesh          -- source object containing mesh data
    face          -- index of face from mesh
    point         -- coordinate of target point on source mesh
    image         -- image texture for source mesh
    mapping_loc   -- offset uv coord location (from mapping node)
    mapping_scale -- offset uv coord scale (from mapping node)
    """
    # get active uv layer data
    mat = get_mat_at_face_idx(obj, face_idx)
    uv = get_uv_layer_data(obj, mat)
    # get face from face index
    face = obj.data.polygons[face_idx]
    # get 3D coordinates of face's vertices
    lco = [obj.data.vertices[i].co for i in face.vertices]
    # get uv coordinates of face's vertices
    luv = [uv[i].uv for i in face.loop_indices]
    # calculate barycentric weights for point
    lwts = poly_3d_calc(lco, point)
    # multiply barycentric weights by uv coordinates
    uv_loc = sum((p * w for p, w in zip(luv, lwts)), Vector((0, 0)))
    # ensure uv_loc is in range(0,1)
    uv_loc = Vector((round(uv_loc[0], 5) % 1, round(uv_loc[1], 5) % 1))
    # apply location and scale offset
    uv_loc = vec_div(uv_loc - mapping_loc, mapping_scale)
    # once again ensure uv_loc is in range(0,1)
    uv_loc = Vector((round(uv_loc[0], 5) % 1, round(uv_loc[1], 5) % 1))
    # convert uv_loc in range(0,1) to uv coordinate
    image_size_x, image_size_y = image.size
    x_co = round(uv_loc.x * (image_size_x - 1))
    y_co = round(uv_loc.y * (image_size_y - 1))
    uv_coord = (x_co, y_co)

    # return resulting uv coordinate
    return Vector(uv_coord)
コード例 #9
0
    if res[1]!=None:
        return res
    else:
        unmatched_correspondences = unmatched_correspondences+1
        return None

unmatched_correspondences = 0
valid_idx = []
for v in garment_vertices:
    # bind garment vertex to closets point in source mesh
    res, loc, nor, idx = source.closest_point_on_mesh(v.co)
    
    # compute barycentric coordinates of the closest point
    face_vertex_index = source_polygons[idx].vertices
    source_face_vertices = [source_vertices[vx].co for vx in face_vertex_index]
    bary_weights = poly_3d_calc(source_face_vertices, loc)
    target_projection_point = Vector((0,0,0))

    # for each vertex in the entailing triangle, 
    # get correspondending point in target    
    check = False
    for i in range(3):
        #co = compute_correspondence(source_vertices[face_vertex_index[i]],source,target)
        
        co = get_corresponding_vertex(face_vertex_index[i])
        
        if co==None:
            #print('correspondence doesnt exist for segment')
            check = True
            break
        else:
コード例 #10
0
input_vertices = input.data.vertices

garment_polygons = garment.data.polygons
default_polygons = default.data.polygons

deformed_garment_vertices = []

for v in garment_vertices:
    res, loc, nor, idx = default.closest_point_on_mesh(v.co)

    face_vertex_index = default_polygons[idx].vertices
    # interpolate projected point from default onto input
    default_face_vertices = [
        default_vertices[vx].co for vx in face_vertex_index
    ]
    bary_weights = poly_3d_calc(default_face_vertices, loc)
    input_projection_point = Vector((0, 0, 0))

    for i in range(3):  #vx in face_vertex_index:
        input_projection_point += bary_weights[i] * input_vertices[
            face_vertex_index[i]].co

    scale = 1.0
    deformed_garment_vertices.append(scale * (input_projection_point +
                                              (v.co - loc)))
    """
    fac_vertices = [ default_vertices[v].co for v in default_face_verticesidx].vertices]
    bary_weights = poly_3d_calc(fac_vertices, loc)
    projection_input = Vector((0,0,0))
    i=0
    for vx in body.data.polygons[idx].vertices: