コード例 #1
0
ファイル: lighting.py プロジェクト: mattloper/opendr
def lambertian_spotlight(v, vn, pos, dir, spot_exponent, camcoord=False, camera_t=None, camera_rt=None):
    """
    :param v: vertices
    :param vn: vertex normals
    :param light_pos: light position
    :param light_dir: light direction
    :param spot_exponent: spot exponent (a la opengl)
    :param camcoord: if True, then pos and dir are wrt the camera
    :param camera_t: 3-vector indicating translation of camera
    :param camera_rt: 3-vector indicating direction of camera
    :return: Vx1 array of brightness
    """

    if camcoord: # Transform pos and dir from camera to world coordinate system
        assert(camera_t is not None and camera_rt is not None)
        from opendr.geometry import Rodrigues
        rot = Rodrigues(rt=camera_rt)
        pos = rot.T.dot(pos-camera_t)
        dir = rot.T.dot(dir)

    dir = dir / ch.sqrt(ch.sum(dir**2.))
    v_minus_light = v - pos.reshape((1,3))
    v_distances = ch.sqrt(ch.sum(v_minus_light**2, axis=1))
    v_minus_light_normed = v_minus_light / v_distances.reshape((-1,1))
    cosangle = v_minus_light_normed.dot(dir.reshape((3,1)))
    light_dot_normal = ch.sum(vn*v_minus_light_normed, axis=1)
    light_dot_normal.label = 'light_dot_normal'
    cosangle.label = 'cosangle'
    result = light_dot_normal.ravel() * cosangle.ravel()**spot_exponent
    result = result / v_distances ** 2.
    result = maximum(result, 0.0)

    return result
コード例 #2
0
def chZonalHarmonics(a):
    zl0 = -ch.sqrt(ch.pi) * (-1.0 + ch.cos(a))
    zl1 = 0.5 * ch.sqrt(3.0 * ch.pi) * ch.sin(a)**2
    zl2 = -0.5 * ch.sqrt(
        5.0 * ch.pi) * ch.cos(a) * (-1.0 + ch.cos(a)) * (ch.cos(a) + 1.0)
    z = [zl0, zl1, zl2]
    return ch.concatenate(z)
コード例 #3
0
ファイル: lighting.py プロジェクト: vstarlinger/opendr
def lambertian_spotlight(v, vn, pos, dir, spot_exponent, camcoord=False, camera_t=None, camera_rt=None):
    """
    :param v: vertices
    :param vn: vertex normals
    :param light_pos: light position
    :param light_dir: light direction
    :param spot_exponent: spot exponent (a la opengl)
    :param camcoord: if True, then pos and dir are wrt the camera
    :param camera_t: 3-vector indicating translation of camera
    :param camera_rt: 3-vector indicating direction of camera
    :return: Vx1 array of brightness
    """

    if camcoord: # Transform pos and dir from camera to world coordinate system
        assert(camera_t is not None and camera_rt is not None)
        from opendr.geometry import Rodrigues
        rot = Rodrigues(rt=camera_rt)
        pos = rot.T.dot(pos-camera_t)
        dir = rot.T.dot(dir)

    dir = dir / ch.sqrt(ch.sum(dir**2.))
    v_minus_light = v - pos.reshape((1,3))
    v_distances = ch.sqrt(ch.sum(v_minus_light**2, axis=1))
    v_minus_light_normed = v_minus_light / v_distances.reshape((-1,1))
    cosangle = v_minus_light_normed.dot(dir.reshape((3,1)))
    light_dot_normal = ch.sum(vn*v_minus_light_normed, axis=1)
    light_dot_normal.label = 'light_dot_normal'
    cosangle.label = 'cosangle'
    result = light_dot_normal.ravel() * cosangle.ravel()**spot_exponent
    result = result / v_distances ** 2.
    result = maximum(result, 0.0)

    return result
コード例 #4
0
    def chumpy_compute_error(p1,
                             p2,
                             H,
                             Mcor,
                             error=0,
                             return_sigma=False,
                             sigma_precomputed=None):
        """Compute deviation of p1 and p2 from common epipole, given H.
        """
        # Warp p2 estimates with new homography
        p2_new_denom = H[2, 0] * p2[:, 0] + H[2, 1] * p2[:, 1] + H[2, 2]
        p2_new_x = (H[0, 0] * p2[:, 0] + H[0, 1] * p2[:, 1] +
                    H[0, 2]) / p2_new_denom
        p2_new_y = (H[1, 0] * p2[:, 0] + H[1, 1] * p2[:, 1] +
                    H[1, 2]) / p2_new_denom
        p2_new = ch.vstack((p2_new_x, p2_new_y)).T

        # Compute current best FoE
        u = p2_new[:, 0] - p1[:, 0]
        v = p2_new[:, 1] - p1[:, 1]
        T = ch.vstack((-v, u, v * p1[:, 0] - u * p1[:, 1])).T
        U, S, V = ch.linalg.svd(Mcor.dot(T.T.dot(T)).dot(Mcor))
        qv = Mcor.dot(V[-1, :])
        q = qv[:2] / qv[2]

        d = T.dot(qv)

        # Robust error norm
        if error == 0:
            d = d**2
        elif error == 1:
            sigma = np.median(np.abs(d() - np.median(d())))
            d = ch.sqrt(d**2 + sigma**2)
        elif error == 2:
            # Geman-McClure
            sigma = np.median(np.abs(d() - np.median(d()))) * 1.5
            d = d**2 / (d**2 + sigma)
        elif error == 3:
            # Geman-McClure, corrected
            if sigma_precomputed is not None:
                sigma = sigma_precomputed
            else:
                sigma = 1.426 * np.median(
                    np.abs(d() - np.median(d()))) * np.sqrt(3.0)

            d = d**2 / (d**2 + sigma**2)
            # Correction
            d = d * sigma

        elif error == 4:
            # Inverse exponential norm
            sigma = np.median(np.abs(d() - np.median(d())))
            d = -ch.exp(-d**2 / (2 * sigma**2)) * sigma

        err = d.sum()

        if return_sigma:
            return sigma
        else:
            return err
コード例 #5
0
ファイル: utils.py プロジェクト: tpalczew/InfoGAN-CR
def transformObjectFull(v, vn, chScale, chObjAz, chObjAx, chObjAz2,
                        chPosition):
    if chScale.size == 1:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[0])[0:3, 0:3]
    elif chScale.size == 2:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[1])[0:3, 0:3]
    else:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[1],
                                  z=chScale[2])[0:3, 0:3]

    chRotAzMat = geometry.RotateZ(a=chObjAz)[0:3, 0:3]
    chRotAxMat = geometry.RotateX(a=-chObjAx)[0:3, 0:3]
    chRotAzMat2 = geometry.RotateZ(a=chObjAz2)[0:3, 0:3]

    transformation = ch.dot(
        ch.dot(ch.dot(chRotAzMat, chRotAxMat), chRotAzMat2), scaleMat)

    invTranspModel = ch.transpose(ch.inv(transformation))

    vtransf = []
    vntransf = []
    for mesh_i, mesh in enumerate(v):
        vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + chPosition]
        vndot = ch.dot(vn[mesh_i], invTranspModel)
        vndot = vndot / ch.sqrt(ch.sum(vndot**2, 1))[:, None]
        vntransf = vntransf + [vndot]
    return vtransf, vntransf
コード例 #6
0
def transformObject(v, vn, chScale, chObjAz, chPosition):
    #print ('utils.py:16  transformObject')
    #print ('v', type(v))
    #import ipdb
    #ipdb.set_trace()
    if chScale.size == 1:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[0])[0:3, 0:3]
    elif chScale.size == 2:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[1])[0:3, 0:3]
    else:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[1],
                                  z=chScale[2])[0:3, 0:3]
    chRotAzMat = geometry.RotateZ(a=chObjAz)[0:3, 0:3]
    chRotAzMatX = geometry.RotateX(a=0)[0:3, 0:3]

    # transformation = scaleMat
    transformation = ch.dot(ch.dot(chRotAzMat, chRotAzMatX), scaleMat)
    invTranspModel = ch.transpose(ch.inv(transformation))

    vtransf = []
    vntransf = []
    for mesh_i, mesh in enumerate(v):
        vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + chPosition]
        vndot = ch.dot(vn[mesh_i], invTranspModel)
        vndot = vndot / ch.sqrt(ch.sum(vndot**2, 1))[:, None]
        vntransf = vntransf + [vndot]
    return vtransf, vntransf
コード例 #7
0
ファイル: lighting.py プロジェクト: mattloper/opendr
def LightDotNormal(num_verts):

    normalize_rows = lambda v : v / col(ch.sqrt(ch.sum(v.reshape((-1,3))**2, axis=1)))
    sum_rows = lambda v :  ch.sum(v.reshape((-1,3)), axis=1)

    return Ch(lambda light_pos, v, vn :
        sum_rows(normalize_rows(light_pos.reshape((1,3)) - v.reshape((-1,3))) * vn.reshape((-1,3))))
コード例 #8
0
def pixelLikelihoodRobustRegionCh(image, template, testMask, backgroundModel,
                                  layerPrior, variances):
    sigma = ch.sqrt(variances)
    mask = testMask
    if backgroundModel == 'FULL':
        mask = np.ones(image.shape[0:2])
    # mask = np.repeat(mask[..., np.newaxis], 3, 2)
    repPriors = ch.tile(layerPrior, image.shape[0:2])
    # sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
    # uniformProbs = np.ones(image.shape)

    imshape = image.shape
    from opendr.filters import filter_for
    from opendr.filters import GaussianKernel2D

    blur_mtx = filter_for(imshape[0],
                          imshape[1],
                          imshape[2] if len(imshape) > 2 else 1,
                          kernel=GaussianKernel2D(3, 1))
    blurred_image = MatVecMult(blur_mtx, image).reshape(imshape)
    blurred_template = MatVecMult(blur_mtx, template).reshape(imshape)

    probs = ch.exp(-(blurred_image - template)**2 /
                   (2 * variances)) * (1. / (sigma * np.sqrt(2 * np.pi)))
    foregroundProbs = (probs[:, :, 0] * probs[:, :, 1] *
                       probs[:, :, 2]) * layerPrior + (1 - repPriors)
    return foregroundProbs * mask + (1 - mask)
コード例 #9
0
def MeshToScan(scan,
               mesh_verts,
               mesh_faces,
               mesh_template_or_sampler,
               rho=lambda x: x,
               normalize=True,
               signed=False):
    """Returns a Ch object whose only dterm is 'mesh_verts'"""

    sampler, n_samples = construct_sampler(mesh_template_or_sampler,
                                           mesh_verts.size / 3)

    norm_const = np.sqrt(n_samples) if normalize else 1

    if signed:
        fn = lambda x: SignedSqrt(rho(x)) / norm_const
    else:
        fn = lambda x: ch.sqrt(rho(x)) / norm_const

    result = Ch(lambda mesh_verts: fn(
        MeshDistanceSquared(sample_verts=mesh_verts,
                            sample_faces=mesh_faces,
                            reference_verts=scan.v,
                            reference_faces=scan.f,
                            sampler=sampler,
                            signed=signed)))

    result.mesh_verts = mesh_verts
    return result
コード例 #10
0
def PtsToMesh(sample_verts,
              reference_verts,
              reference_faces,
              reference_template_or_sampler,
              rho=lambda x: x,
              normalize=True,
              signed=False):
    """Returns a Ch object whose dterms are 'reference_v' and 'sample_v'"""

    sampler = {'point2sample': sp.eye(sample_verts.size, sample_verts.size)}
    n_samples = sample_verts.size / 3

    norm_const = np.sqrt(n_samples) if normalize else 1

    if signed:
        fn = lambda x: SignedSqrt(rho(x)) / norm_const
    else:
        fn = lambda x: ch.sqrt(rho(x)) / norm_const

    result = Ch(lambda sample_v, reference_v: fn(
        MeshDistanceSquared(sample_verts=sample_v,
                            reference_verts=reference_v,
                            reference_faces=reference_faces,
                            sampler=sampler,
                            signed=signed)))

    result.reference_v = reference_verts
    result.sample_v = sample_verts
    return result
コード例 #11
0
def transformObject(v, vn, chScale, chObjAz, chObjDisplacement, chObjRotation,
                    targetPosition):

    scaleMat = geometry.Scale(x=chScale[0], y=chScale[1], z=chScale[2])[0:3,
                                                                        0:3]
    chRotAzMat = geometry.RotateZ(a=-chObjAz)[0:3, 0:3]
    transformation = ch.dot(chRotAzMat, scaleMat)
    invTranspModel = ch.transpose(ch.inv(transformation))

    objDisplacementMat = computeHemisphereTransformation(
        chObjRotation, 0, chObjDisplacement, np.array([0, 0, 0]))

    newPos = objDisplacementMat[0:3, 3]

    vtransf = []
    vntransf = []

    for mesh_i, mesh in enumerate(v):
        vtransf = vtransf + [
            ch.dot(v[mesh_i], transformation) + newPos + targetPosition
        ]
        # ipdb.set_trace()
        # vtransf = vtransf + [v[mesh_i] + chPosition]
        vndot = ch.dot(vn[mesh_i], invTranspModel)
        vndot = vndot / ch.sqrt(ch.sum(vndot**2, 1))[:, None]
        vntransf = vntransf + [vndot]
    return vtransf, vntransf, newPos
コード例 #12
0
ファイル: lighting.py プロジェクト: vstarlinger/opendr
def LightDotNormal(num_verts):

    normalize_rows = lambda v : v / ch.sqrt(ch.sum(v.reshape((-1,3))**2, axis=1)).reshape((-1,1))
    sum_rows = lambda v :  ch.sum(v.reshape((-1,3)), axis=1)

    return Ch(lambda light_pos, v, vn :
        sum_rows(normalize_rows(light_pos.reshape((1,3)) - v.reshape((-1,3))) * vn.reshape((-1,3))))
コード例 #13
0
ファイル: generative_models.py プロジェクト: tszhang97/opendr
def logPixelLikelihoodErrorCh(sqerrors, testMask, backgroundModel, variances):
    sigma = ch.sqrt(variances)
    # sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
    mask = testMask
    if backgroundModel == 'FULL':
        mask = np.ones(sqerrors.shape[0:2])
    # mask = np.repeat(mask[..., np.newaxis], 3, 2)
    uniformProbs = np.ones(sqerrors.shape[0:2])
    logprobs =   (-(sqerrors) / (2. * variances))  - ch.log((sigma * np.sqrt(2.0 * np.pi)))
    pixelLogProbs = logprobs[:,:,0] + logprobs[:,:,1] + logprobs[:,:,2]
    return pixelLogProbs * mask
コード例 #14
0
ファイル: generative_models.py プロジェクト: tszhang97/opendr
def pixelLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPrior, variances):
    sigma = ch.sqrt(variances)
    mask = testMask
    if backgroundModel == 'FULL':
        mask = np.ones(image.shape[0:2])
    # mask = np.repeat(mask[..., np.newaxis], 3, 2)
    repPriors = ch.tile(layerPrior, image.shape[0:2])
    # sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
    # uniformProbs = np.ones(image.shape)

    probs = ch.exp( - (image - template)**2 / (2 * variances)) * (1./(sigma * np.sqrt(2 * np.pi)))
    foregroundProbs = (probs[:,:,0] * probs[:,:,1] * probs[:,:,2]) * layerPrior + (1 - repPriors)
    return foregroundProbs * mask + (1-mask)
コード例 #15
0
def axis2quat(p):
    angle = ch.sqrt(ch.clip(ch.sum(ch.square(p), 1), 1e-16, 1e16))
    norm_p = p / angle[:, np.newaxis]
    cos_angle = ch.cos(angle / 2)
    sin_angle = ch.sin(angle / 2)
    qx = norm_p[:, 0] * sin_angle
    qy = norm_p[:, 1] * sin_angle
    qz = norm_p[:, 2] * sin_angle
    qw = cos_angle - 1
    return ch.concatenate([
        qx[:, np.newaxis], qy[:, np.newaxis], qz[:, np.newaxis], qw[:,
                                                                    np.newaxis]
    ],
                          axis=1)
コード例 #16
0
ファイル: generative_models.py プロジェクト: tszhang97/opendr
def layerPosteriorsRobustCh(image, template, testMask, backgroundModel, layerPrior, variances):

    sigma = ch.sqrt(variances)
    mask = testMask
    if backgroundModel == 'FULL':
        mask = np.ones(image.shape[0:2])
    # mask = np.repeat(mask[..., np.newaxis], 3, 2)
    repPriors = ch.tile(layerPrior, image.shape[0:2])
    probs = ch.exp( - (image - template)**2 / (2 * variances))  * (1/(sigma * np.sqrt(2 * np.pi)))
    foregroundProbs =  probs[:,:,0] * probs[:,:,1] * probs[:,:,2] * layerPrior
    backgroundProbs = np.ones(image.shape)
    outlierProbs = ch.Ch(1-repPriors)
    lik = pixelLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPrior, variances)
    # prodlik = np.prod(lik, axis=2)
    # return np.prod(foregroundProbs*mask, axis=2)/prodlik, np.prod(outlierProbs*mask, axis=2)/prodlik

    return foregroundProbs*mask/lik, outlierProbs*mask/lik
コード例 #17
0
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
    teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
    teapot.layers[1] = True
    teapot.layers[2] = True
    targetModels = targetModels + [teapot]
    blender_teapots = blender_teapots + [teapot]


v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels)

azimuth = np.pi
chCosAz = ch.Ch([np.cos(azimuth)])
chSinAz = ch.Ch([np.sin(azimuth)])

chAz = 2*ch.arctan(chSinAz/(ch.sqrt(chCosAz**2 + chSinAz**2) + chCosAz))
chAz = ch.Ch([np.pi/4])
chObjAz = ch.Ch([np.pi/4])
chAzRel = chAz - chObjAz

elevation = 0
chLogCosEl = ch.Ch(np.log(np.cos(elevation)))
chLogSinEl = ch.Ch(np.log(np.sin(elevation)))
chEl = 2*ch.arctan(ch.exp(chLogSinEl)/(ch.sqrt(ch.exp(chLogCosEl)**2 + ch.exp(chLogSinEl)**2) + ch.exp(chLogCosEl)))
chEl =  ch.Ch([0.95993109])
chDist = ch.Ch([camDistance])

chObjAzGT = ch.Ch([np.pi*3/2])
chAzGT = ch.Ch([np.pi*3/2])
chAzRelGT = chAzGT - chObjAzGT
chElGT = ch.Ch(chEl.r[0])
コード例 #18
0
def lift2Dto3D(projPtsGT,
               camMat,
               filename,
               img,
               JVis=np.ones((21, ), dtype=np.float32),
               trans=None,
               beta=None,
               wrist3D=None,
               withPoseCoeff=True,
               weights=1.0,
               relDepGT=None,
               rel3DCoordGT=None,
               rel3DCoordNormGT=None,
               img2DGT=None,
               outDir=None,
               poseCoeffInit=None,
               transInit=None,
               betaInit=None):

    loss = {}

    if withPoseCoeff:
        numComp = 30
        m, poseCoeffCh, betaCh, transCh, fullposeCh = getHandModelPoseCoeffs(
            numComp)

        if poseCoeffInit is not None:
            poseCoeffCh[:] = poseCoeffInit

        if transInit is not None:
            transCh[:] = transInit

        if betaInit is not None:
            betaCh[:] = betaInit

        freeVars = [poseCoeffCh]
        if beta is None:
            freeVars = freeVars + [betaCh]
            loss['shape'] = 1e2 * betaCh
        else:
            betaCh[:] = beta

        if trans is None:
            freeVars = freeVars + [transCh]
        else:
            transCh[:] = trans

        # loss['pose'] = 0.5e2 * poseCoeffCh[3:]/stdPCACoeff[:numComp]

        thetaConstMin, thetaConstMax = Constraints().getHandJointConstraints(
            fullposeCh[3:])
        loss['constMin'] = 5e2 * thetaConstMin
        loss['constMax'] = 5e2 * thetaConstMax
        loss['invalidTheta'] = 1e3 * fullposeCh[Constraints().invalidThetaIDs]

    else:
        m, rotCh, jointsCh, transCh, betaCh = getHandModel()

        thetaConstMin, thetaConstMax = Constraints().getHandJointConstraints(
            jointsCh)
        loss['constMin'] = 5e3 * thetaConstMin
        loss['constMax'] = 5e3 * thetaConstMax
        validTheta = jointsCh[Constraints().validThetaIDs[3:] - 3]

        freeVars = [validTheta, rotCh]

        if beta is None:
            freeVars = freeVars + [betaCh]
            loss['shape'] = 0.5e2 * betaCh
        else:
            betaCh[:] = beta

        if trans is None:
            freeVars = freeVars + [transCh]
        else:
            transCh[:] = trans

    if relDepGT is not None:
        relDepPred = m.J_transformed[:, 2] - m.J_transformed[0, 2]
        loss['relDep'] = (relDepPred - relDepGT) * weights[:, 0] * 5e1

    if rel3DCoordGT is not None:
        rel3DCoordPred = m.J_transformed - m.J_transformed[0:1, :]
        loss['rel3DCoord'] = (rel3DCoordPred - rel3DCoordGT) * np.tile(
            weights[:, 0:1], [1, 3]) * 5e1

    if rel3DCoordNormGT is not None:
        rel3DCoordPred = m.J_transformed[jointsMap][
            1:, :] - m.J_transformed[jointsMap][0:1, :]

        rel3DCoordPred = rel3DCoordPred / ch.expand_dims(
            ch.sqrt(ch.sum(ch.square(rel3DCoordPred), axis=1)), axis=1)
        loss['rel3DCoordNorm'] = (
            1. - ch.sum(rel3DCoordPred * rel3DCoordNormGT, axis=1)) * 1e4

        # loss['rel3DCoordNorm'] = \
        #     (rel3DCoordNormGT*ch.expand_dims(ch.sum(rel3DCoordPred*rel3DCoordNormGT, axis=1), axis=1) - rel3DCoordPred) * 1e2#5e2

    projPts = utilsEval.chProjectPoints(m.J_transformed, camMat,
                                        False)[jointsMap]
    JVis = np.tile(np.expand_dims(JVis, 1), [1, 2])
    loss['joints2D'] = (projPts - projPtsGT) * JVis * weights * 1e0
    loss['joints2DClip'] = clipIden(projPts - projPtsGT) * JVis * weights * 1e1

    if wrist3D is not None:
        dep = wrist3D[2]
        if dep < 0:
            dep = -dep
        loss['wristDep'] = (m.J_transformed[0, 2] - dep) * 1e2

    # vis_mesh(m)

    render = False

    def cbPass(_):

        pass
        # print(loss['joints'].r)

    print(filename)
    warnings.simplefilter('ignore')

    loss['joints2D'] = loss[
        'joints2D'] * 1e1 / weights  # dont want to use confidence now

    if True:
        ch.minimize({k: loss[k]
                     for k in loss.keys() if k != 'joints2DClip'},
                    x0=freeVars,
                    callback=cbPass if render else cbPass,
                    method='dogleg',
                    options={'maxiter': 50})
    else:
        manoVis.dump3DModel2DKpsHand(img,
                                     m,
                                     filename,
                                     camMat,
                                     est2DJoints=projPtsGT,
                                     gt2DJoints=img2DGT,
                                     outDir=outDir)

        freeVars = [poseCoeffCh[:3], transCh]
        ch.minimize({k: loss[k]
                     for k in loss.keys() if k != 'joints2DClip'},
                    x0=freeVars,
                    callback=cbPass,
                    method='dogleg',
                    options={'maxiter': 20})

        manoVis.dump3DModel2DKpsHand(img,
                                     m,
                                     filename,
                                     camMat,
                                     est2DJoints=projPtsGT,
                                     gt2DJoints=img2DGT,
                                     outDir=outDir)
        freeVars = [poseCoeffCh[3:]]
        ch.minimize({k: loss[k]
                     for k in loss.keys() if k != 'joints2DClip'},
                    x0=freeVars,
                    callback=cb if render else cbPass,
                    method='dogleg',
                    options={'maxiter': 20})

        manoVis.dump3DModel2DKpsHand(img,
                                     m,
                                     filename,
                                     camMat,
                                     est2DJoints=projPtsGT,
                                     gt2DJoints=img2DGT,
                                     outDir=outDir)
        freeVars = [poseCoeffCh, transCh]
        if beta is None:
            freeVars = freeVars + [betaCh]
        ch.minimize({k: loss[k]
                     for k in loss.keys() if k != 'joints2DClip'},
                    x0=freeVars,
                    callback=cb if render else cbPass,
                    method='dogleg',
                    options={'maxiter': 20})

    if False:
        open3dVisualize(m)
    else:
        manoVis.dump3DModel2DKpsHand(img,
                                     m,
                                     filename,
                                     camMat,
                                     est2DJoints=projPtsGT,
                                     gt2DJoints=img2DGT,
                                     outDir=outDir)

    # vis_mesh(m)

    joints3D = m.J_transformed.r[jointsMap]

    # print(betaCh.r)
    # print((relDepPred.r - relDepGT))

    return joints3D, poseCoeffCh.r.copy(), betaCh.r.copy(), transCh.r.copy(
    ), loss['joints2D'].r.copy(), m.r.copy()
コード例 #19
0
def f(u):
    return ch.sqrt(EPSILON**2 + ch.power(u, 2)) - EPSILON
コード例 #20
0
def generateSceneImages(width, height, envMapFilename, envMapMean, phiOffset,
                        chAzGT, chElGT, chDistGT, light_colorGT, chComponentGT,
                        glMode):
    replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
    sceneLines = [line.strip() for line in open(replaceableScenesFile)]
    for sceneIdx in np.arange(len(sceneLines)):
        sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(
            sceneIdx, replaceableScenesFile)
        sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
        bpy.ops.wm.read_factory_settings()
        scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
        scene = bpy.data.scenes['Main Scene']
        bpy.context.screen.scene = scene
        scene_io_utils.setupScene(scene, roomInstanceNum, scene.world,
                                  scene.camera, width, height, 16, True, False)
        scene.update()
        scene.render.resolution_x = width  #perhaps set resolution in code
        scene.render.resolution_y = height
        scene.render.tile_x = height / 2
        scene.render.tile_y = width
        scene.cycles.samples = 100
        addEnvironmentMapWorld(envMapFilename, scene)
        setEnviornmentMapStrength(0.3 / envMapMean, scene)
        rotateEnviornmentMap(phiOffset, scene)

        if not os.path.exists('scenes/' + str(sceneNumber)):
            os.makedirs('scenes/' + str(sceneNumber))
        for targetIdx, targetIndex in enumerate(targetIndices):
            targetPosition = targetPositions[targetIdx]

            rendererGT.clear()
            del rendererGT

            v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(
                sceneDicFile)
            # removeObjectData(targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
            # addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list,  v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
            vflat = [item for sublist in v for item in sublist]
            rangeMeshes = range(len(vflat))
            vch = [ch.array(vflat[mesh]) for mesh in rangeMeshes]
            # vch[0] = ch.dot(vch[0], scaleMatGT) + targetPosition
            if len(vch) == 1:
                vstack = vch[0]
            else:
                vstack = ch.vstack(vch)
            cameraGT, modelRotationGT = setupCamera(vstack, chAzGT, chElGT,
                                                    chDistGT, targetPosition,
                                                    width, height)
            # cameraGT, modelRotationGT = setupCamera(vstack, chAzGT, chElGT, chDistGT, center + targetPosition, width, height)
            vnflat = [item for sublist in vn for item in sublist]
            vnch = [ch.array(vnflat[mesh]) for mesh in rangeMeshes]
            vnchnorm = [
                vnch[mesh] /
                ch.sqrt(vnch[mesh][:, 0]**2 + vnch[mesh][:, 1]**2 +
                        vnch[mesh][:, 2]**2).reshape([-1, 1])
                for mesh in rangeMeshes
            ]
            vcflat = [item for sublist in vc for item in sublist]
            vcch = [ch.array(vcflat[mesh]) for mesh in rangeMeshes]
            vc_list = computeSphericalHarmonics(vnchnorm, vcch, light_colorGT,
                                                chComponentGT)

            rendererGT = TexturedRenderer()
            rendererGT.set(glMode=glMode)
            setupTexturedRenderer(rendererGT, vstack, vch, f_list, vc_list,
                                  vnchnorm, uv, haveTextures_list,
                                  textures_list, cameraGT, frustum, win)
            cv2.imwrite(
                'scenes/' + str(sceneNumber) + '/opendr_' + str(targetIndex) +
                '.png', 255 * rendererGT.r[:, :, [2, 1, 0]])

            placeCamera(scene.camera, -chAzGT[0].r * 180 / np.pi,
                        chElGT[0].r * 180 / np.pi, chDistGT, targetPosition)
            scene.update()
            scene.render.filepath = 'scenes/' + str(
                sceneNumber) + '/blender_' + str(targetIndex) + '.png'
            bpy.ops.render.render(write_still=True)
コード例 #21
0
 def normalize_rows(v):
     b = ch.sqrt(ch.sum(v.reshape((-1, 3))**2, axis=1)).reshape((-1, 1))
     return v / b.compute_r()
コード例 #22
0
 def obj_s2m(w, i):
     from sbody.mesh_distance import ScanToMesh
     return w * ch.sqrt(GMOf(ScanToMesh(scan_flat[i], sv_flat[i], f), sigma[i]))
コード例 #23
0
numVars = chInput.size

recSoftmaxW = ch.Ch(np.random.uniform(0, 1, [nRecComps, numVars]) / numVars)

chRecLogistic = ch.exp(ch.dot(recSoftmaxW, chInput.reshape([numVars, 1])))
chRecSoftmax = chRecLogistic.ravel() / ch.sum(chRecLogistic)

chZRecComps = ch.zeros([numVars, nRecComps])

chZ = ch.zeros([numVars])

recMeans = ch.Ch(np.random.uniform(0, 1, [3, nRecComps]))
recCovars = 0.2
chRecLogLikelihoods = -0.5 * (chZ.reshape([numPixels, 3, 1]) - ch.tile(
    recMeans, [numPixels, 1, 1]))**2 - ch.log(
        (2 * recCovars) * (1 / (ch.sqrt(recCovars) * np.sqrt(2 * np.pi))))

genZCovars = 0.2
chGenComponentsProbs = ch.Ch(gmm.weights_)
chCompMeans = ch.zeros([nComps, 3])

for comp in range(nComps):
    chCompMeans[comp, :] = gmm.means_[comp]

chPZComp = ch.exp(
    -(ch.tile(chZ.reshape([numPixels, 3, 1]), [1, 1, nComps]) -
      chCompMeans.reshape([1, 3, nComps]))**2 /
    (2 * genZCovars)) * (1 / (ch.sqrt(genZCovars) * np.sqrt(2 * np.pi)))

chPZ = ch.dot(chGenComponentsProbs.reshape([1, nComps]),
              chPZComp.reshape([5, numVars]))
コード例 #24
0
def diffHog(image, drconv=None, numOrient=9, cwidth=8, cheight=8):
    imagegray = 0.3 * image[:, :, 0] + 0.59 * image[:, :,
                                                    1] + 0.11 * image[:, :, 2]
    sy, sx = imagegray.shape

    # gx = ch.empty(imagegray.shape, dtype=np.double)
    gx = imagegray[:, 2:] - imagegray[:, :-2]
    gx = ch.hstack([np.zeros([sy, 1]), gx, np.zeros([sy, 1])])

    gy = imagegray[2:, :] - imagegray[:-2, :]
    # gy = imagegray[:, 2:] - imagegray[:, :-2]
    gy = ch.vstack([np.zeros([1, sx]), gy, np.zeros([1, sx])])

    gx += 1e-5
    # gy = imagegray[:-2,1:-1] - imagegray[2:,1:-1] + 0.00001
    # gx = imagegray[1:-1,:-2] - imagegray[1:-1, 2:] + 0.00001

    distFilter = np.ones([2 * cheight, 2 * cwidth], dtype=np.uint8)
    distFilter[np.int(2 * cheight / 2), np.int(2 * cwidth / 2)] = 0
    distFilter = (cv2.distanceTransform(distFilter, cv2.DIST_L2, 3) - np.max(
        cv2.distanceTransform(distFilter, cv2.DIST_L2, 3))) / (
            -np.max(cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)))

    magn = ch.sqrt(gy**2 + gx**2) * 180 / np.sqrt(2)

    angles = ch.arctan(gy / gx) * 180 / np.pi + 90

    # meanOrient = np.linspace(0, 180, numOrient)

    orientations_arr = np.arange(numOrient)

    meanOrient = orientations_arr / numOrient * 180

    fb_resttmp = 1 - ch.abs(
        ch.expand_dims(angles[:, :], 2) -
        meanOrient[1:].reshape([1, 1, numOrient - 1])) * numOrient / 180
    zeros_rest = np.zeros([sy, sx, numOrient - 1, 1])
    fb_rest = ch.max(ch.concatenate([fb_resttmp[:, :, :, None], zeros_rest],
                                    axis=3),
                     axis=3)

    chMinOrient0 = ch.min(ch.concatenate([
        ch.abs(
            ch.expand_dims(angles[:, :], 2) -
            meanOrient[0].reshape([1, 1, 1]))[:, :, :, None],
        ch.abs(180 - ch.expand_dims(angles[:, :], 2) -
               meanOrient[0].reshape([1, 1, 1]))[:, :, :, None]
    ],
                                         axis=3),
                          axis=3)

    zeros_fb0 = np.zeros([sy, sx, 1])
    fb0_tmp = ch.concatenate(
        [1 - chMinOrient0[:, :] * numOrient / 180, zeros_fb0], axis=2)
    fb_0 = ch.max(fb0_tmp, axis=2)

    fb = ch.concatenate([fb_0[:, :, None], fb_rest], axis=2)

    # fb[:,:,0] = ch.max(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180,0)

    # fb = 1./(1. + ch.exp(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180))

    Fb = ch.expand_dims(magn, 2) * fb

    if drconv is None:
        drconv = dr_wrt_convolution(Fb[:, :, 0], distFilter)

    Fs_list = [
        convolve2D(x=Fb[:, :, Fbi], filter=distFilter,
                   convolve2DDr=drconv).reshape([Fb.shape[0], Fb.shape[1], 1])
        for Fbi in range(numOrient)
    ]

    # Fs_list = [scipy.signal.convolve2d(Fb[:,:,Fbi], distFilter).reshape([Fb.shape[0], Fb.shape[1],1]) for Fbi in range(numOrient)]
    Fs = ch.concatenate(Fs_list, axis=2)

    # cellCols = np.arange(start=cwidth/2, stop=Fs.shape[1]-cwidth/2 , step=cwidth)
    # cellRows = np.arange(start=cheight/2, stop=Fs.shape[0]-cheight/2 , step=cheight)

    Fcells = Fs[0:Fs.shape[0]:cheight, 0:Fs.shape[1]:cwidth, :]

    epsilon = 1e-5

    v = Fcells / ch.sqrt(ch.sum(Fcells**2) + epsilon)
    # v = Fcells

    # hog, hogim = skimage.feature.hog(imagegray,  orientations=numOrient, pixels_per_cell=(cheight, cwidth), visualise=True)
    hog_image = HogImage(image=image,
                         hog=Fcells,
                         numOrient=numOrient,
                         cwidth=cwidth,
                         cheight=cheight)

    # plt.imshow(hog_image)
    # plt.figure()
    # plt.imshow(hogim)
    # ipdb.set_trace()

    return v, hog_image, drconv
コード例 #25
0
ファイル: refine_A.py プロジェクト: zzz622848/mrflow
def compute_cost(params,
                 p0,
                 u_fwd,
                 v_fwd,
                 A_reference,
                 mu_reference,
                 mu_refine,
                 x,
                 y,
                 sigma=1.0,
                 optimize_H=True,
                 optimize_q=False,
                 optimize_B=True):
    """ Compute structure matching cost using chumpy
    
    Parameters:
    x[0] - x[7]: Coordinates of corner points in second frame
    x[8],x[9]: Q
    x[10]: B
    
    p1 : Coordinates of corner points in first frame
    
    We optimize over all parameters
    """

    params_ch = ch.array(params)

    # Extract corner points
    p1 = params_ch[:8].reshape((4, 2))
    q = params_ch[8:10]
    B = params_ch[10]

    # Convert the ones we do not want to
    if not optimize_B:
        B = B()
    if not optimize_H:
        p1 = np.array(p1)
    if not optimize_q:
        q = np.array(q)

    # Compute unity vectors towards q
    vx = q[0] - x
    vy = q[1] - y
    nrm = ch.maximum(1.0, ch.sqrt((vx**2 + vy**2)))
    vx /= nrm
    vy /= nrm

    if not optimize_q:
        vx = np.copy(vx)
        vy = np.copy(vy)

    # Compute differentiable homography (mapping I1 to I0)
    H_inv = chumpy_get_H(p1, p0)
    if not optimize_H:
        H_inv = np.copy(H_inv)

    # Remove differentiable homography from backward flow
    x_new = x + u_fwd
    y_new = y + v_fwd

    D = H_inv[2, 0] * x_new + H_inv[2, 1] * y_new + H_inv[2, 2]
    u_parallax = (H_inv[0, 0] * x_new + H_inv[0, 1] * y_new +
                  H_inv[0, 2]) / D - x
    v_parallax = (H_inv[1, 0] * x_new + H_inv[1, 1] * y_new +
                  H_inv[1, 2]) / D - y
    r = u_parallax * vx + v_parallax * vy

    # Compute A estimates
    A_refined = r / (B * (r / mu_refine - nrm / mu_refine))

    # The data we want to match
    z_match = A_reference * (mu_refine / mu_reference)
    z = A_refined

    #
    # Use Geman-Mcclure error
    #

    err_per_px = (z - z_match)**2
    err = (sigma * err_per_px / (sigma**2 + err_per_px)).sum()
    # Lorentzian
    # err = (sigma * ch.log(1+0.5 * err_per_px/(sigma**2))).sum()
    derr = err.dr_wrt(params_ch)

    return err, np.copy(derr).flatten()