Esempio n. 1
0
    def call(self, inp):
        images, vertexlabel, Js_in = inp
        out_dict = {}
        images = [
            tf.Variable(x, dtype=tf.float32, trainable=False) for x in images
        ]
        vertexlabel = tf.cast(tf.Variable(vertexlabel, trainable=False),
                              tf.int32)
        if FACE:
            Js = [Lambda(lambda j: j[:, :25])(J) for J in Js_in]
        else:
            Js = [
                self.flatten(
                    tf.cast(tf.Variable(x, trainable=False), tf.float32))
                for x in Js_in
            ]

        with tf.device('/gpu:1'):
            lat_codes = [self.top_([q, j]) for q, j in zip(images, Js)]
            latent_code_offset = self.avg([q[0] for q in lat_codes])
            latent_code_betas = self.avg([q[1] for q in lat_codes])
            latent_code_pose = [
                tf.concat([q[1], x], axis=-1) for q, x in zip(lat_codes, Js)
            ]

        with tf.device('/gpu:2'):
            latent_code_betas = self.lat_betas(latent_code_betas)
            betas = self.betas(latent_code_betas)

            latent_code_pose = [self.lat_pose(x) for x in latent_code_pose]

            pose_trans_init = tf.tile(tf.expand_dims(self.pose_trans, 0),
                                      (K.int_shape(betas)[0], 1))

            poses_ = [
                self.lat_pose_layer(x) + pose_trans_init
                for x in latent_code_pose
            ]
            trans_ = [self.cut_trans(x) for x in poses_]
            trans = [la(i) for la, i in zip(self.trans_layers, trans_)]

            poses_ = [self.cut_poses(x) for x in poses_]
            poses_ = [self.reshape_pose(x) for x in poses_]
            poses = [la(i) for la, i in zip(self.pose_layers, poses_)]

            ##
            out_dict['betas'] = betas
            for i in range(NUM):
                out_dict['pose_{}'.format(i)] = poses[i]
                out_dict['trans_{}'.format(i)] = trans[i]

            latent_code_offset_ShapeMerged = self.latent_code_offset_ShapeMerged(
                latent_code_offset)
            latent_code_offset_ShapeMerged = self.latent_code_offset_ShapeMerged_2(
                latent_code_offset_ShapeMerged)

            garm_model_outputs = [
                fe(latent_code_offset_ShapeMerged) for fe in self.garmentModels
            ]
            garment_verts_all = [fe[0] for fe in garm_model_outputs]
            garment_pca = [fe[1] for fe in garm_model_outputs]
            garment_pca = tf.stack(garment_pca, axis=1)

            ##
            out_dict['pca_verts'] = garment_pca

            lis = []
            for go, vs in zip(garment_verts_all, self.scatters):
                lis.append(vs(go))
            garment_verts_all_scattered = tf.stack(lis, axis=-1)

            # Get naked smpl to compute garment offsets
            zerooooooo = K.zeros_like(garment_verts_all_scattered[..., 0])
            pooooooooo = [K.zeros_like(p) for p in poses]
            tooooooooo = [K.zeros_like(p) for p in trans]

            smpls_base = []
            for i, (p, t) in enumerate(zip(pooooooooo, tooooooooo)):
                v, _, n, _ = self.smpl(p, betas, t, zerooooooo)
                smpls_base.append(v)
                if i == 0:
                    vertices_naked_ = n

            # Append Skin offsets
            garment_verts_all_scattered = tf.concat([
                K.expand_dims(vertices_naked_, -1),
                tf.cast(garment_verts_all_scattered, vertices_naked_.dtype)
            ],
                                                    axis=-1)
            garment_verts_all_scattered = tf.transpose(
                garment_verts_all_scattered, perm=[0, 1, 3, 2])
            clothed_verts = tf.batch_gather(garment_verts_all_scattered,
                                            vertexlabel)
            clothed_verts = tf.squeeze(
                tf.transpose(clothed_verts, perm=[0, 1, 3, 2]))

            offsets_ = clothed_verts - vertices_naked_

            smpls = []
            for i, (p, t) in enumerate(zip(poses, trans)):
                v, t, n, _ = self.smpl(p, betas, t, offsets_)
                smpls.append(v)
                if i == 0:
                    vertices_naked = n
                    vertices_tposed = t

            Js = [
                jl(self.smpl_J([p, betas, t]))
                for jl, p, t in zip(self.J_layers, poses, trans)
            ]
            vertices = tf.concat([
                tf.expand_dims(smpl, axis=-1) for i, smpl in enumerate(smpls)
            ],
                                 axis=-1)

            ##
            out_dict['vertices'] = vertices
            out_dict['vertices_tposed'] = vertices_tposed
            out_dict['vertices_naked'] = vertices_naked

            ##
            out_dict['vertices'] = vertices
            out_dict['vertices_tposed'] = vertices_tposed
            out_dict['vertices_naked'] = vertices_naked
            out_dict['offsets_h'] = offsets_
            for i in range(NUM):
                out_dict['J_{}'.format(i)] = Js[i]

            vert_cols = tf.reshape(
                tf.gather(self.colormap, tf.reshape(vertexlabel, (-1, ))),
                (-1, config.NVERTS, 3))
            renderered_garms_all = []

        for view in range(NUM):
            renderered_garms_all.append(
                render_colored_batch(
                    vertices[..., view],
                    self.faces,
                    vert_cols,  # [bat],
                    IMG_SIZE,
                    IMG_SIZE,
                    FOCAL_LENGTH,
                    CAMERA_CENTER,
                    np.zeros(3, dtype=np.float32),
                    num_channels=3))

        renderered_garms_all = tf.transpose(renderered_garms_all,
                                            [1, 2, 3, 4, 0])
        out_dict['rendered'] = renderered_garms_all

        lap = compute_laplacian_diff(vertices_tposed, vertices_naked,
                                     self.faces)
        ##
        out_dict['laplacian'] = lap
        return out_dict
Esempio n. 2
0
    def call(self, inp):
        images, vertexlabel, Js_in = inp
        out_dict = {}
        """
        print('\n'*2 + '='*99)
        print(len(images))    # 8
        print(images[0].shape)# (2, 480, 270, 3)
        print('='*99 + '\n'*2  )
        #raise Exception("nxb stopped execution here.")
        """
        # NoTE:    this is the last place I was trying to understand why img1[0] and img1[1] aren't images of the same person from the same view.  -nxb
        # NOTE:  BLB told us that test_network.py runs on 2 people simultaenously
        images = [
            tf.Variable(x, dtype=tf.float32, trainable=False) for x in images
        ]  # list of 2 images (as tf.Variable()s)
        vertexlabel = tf.cast(tf.Variable(vertexlabel, trainable=False),
                              tf.int32)
        if FACE:
            Js = [Lambda(lambda j: j[:, :25])(J) for J in Js_in]
        else:
            Js = [
                self.flatten(
                    tf.cast(tf.Variable(x, trainable=False), tf.float32))
                for x in Js_in
            ]

        with tf.device('/gpu:0'):
            lat_codes = [self.top_([q, j]) for q, j in zip(images, Js)]
            latent_code_offset = self.avg([q[0] for q in lat_codes])
            latent_code_betas = self.avg([q[1] for q in lat_codes])
            latent_code_pose = [
                tf.concat([q[1], x], axis=-1) for q, x in zip(lat_codes, Js)
            ]

            latent_code_betas = self.lat_betas(latent_code_betas)
            betas = self.betas(latent_code_betas)

            latent_code_pose = [self.lat_pose(x) for x in latent_code_pose]

            pose_trans_init = tf.tile(tf.expand_dims(self.pose_trans, 0),
                                      (K.int_shape(betas)[0], 1))

            poses_ = [
                self.lat_pose_layer(x) + pose_trans_init
                for x in latent_code_pose
            ]
            trans_ = [self.cut_trans(x) for x in poses_]
            trans = [la(i) for la, i in zip(self.trans_layers, trans_)]

            poses_ = [self.cut_poses(x) for x in poses_]
            poses_ = [self.reshape_pose(x) for x in poses_]
            poses = [la(i) for la, i in zip(self.pose_layers, poses_)]

            ##
            out_dict['betas'] = betas
            for i in range(NUM):
                out_dict['pose_{}'.format(i)] = poses[i]
                out_dict['trans_{}'.format(i)] = trans[i]

            latent_code_offset_ShapeMerged = self.latent_code_offset_ShapeMerged(
                latent_code_offset)
            latent_code_offset_ShapeMerged = self.latent_code_offset_ShapeMerged_2(
                latent_code_offset_ShapeMerged)

            garm_model_outputs = [
                fe(latent_code_offset_ShapeMerged) for fe in self.garmentModels
            ]
            garment_verts_all = [fe[0] for fe in garm_model_outputs]
            garment_pca = [fe[1] for fe in garm_model_outputs]
            garment_pca = tf.stack(garment_pca, axis=1)

            ##
            out_dict['pca_verts'] = garment_pca

            lis = []
            for go, vs in zip(garment_verts_all, self.scatters):
                lis.append(vs(go))
            garment_verts_all_scattered = tf.stack(lis, axis=-1)

            ## Get naked smpl to compute garment offsets
            zerooooooo = K.zeros_like(garment_verts_all_scattered[..., 0])
            pooooooooo = [K.zeros_like(p) for p in poses]
            tooooooooo = [K.zeros_like(p) for p in trans]

            smpls_base = []
            for i, (p, t) in enumerate(zip(pooooooooo, tooooooooo)):
                v, _, n, _ = self.smpl(p, betas, t, zerooooooo)
                smpls_base.append(v)
                if i == 0:
                    vertices_naked_ = n

            ## Append Skin offsets
            garment_verts_all_scattered = tf.concat([
                K.expand_dims(vertices_naked_, -1),
                tf.cast(garment_verts_all_scattered, vertices_naked_.dtype)
            ],
                                                    axis=-1)
            garment_verts_all_scattered = tf.transpose(
                garment_verts_all_scattered, perm=[0, 1, 3, 2])
            clothed_verts = tf.batch_gather(garment_verts_all_scattered,
                                            vertexlabel)
            clothed_verts = tf.squeeze(
                tf.transpose(clothed_verts, perm=[0, 1, 3, 2]))

            offsets_ = clothed_verts - vertices_naked_

            smpls = []
            for i, (p, t) in enumerate(zip(poses, trans)):
                v, t, n, _ = self.smpl(p, betas, t, offsets_)
                smpls.append(v)
                if i == 0:
                    vertices_naked = n
                    vertices_tposed = t

        Js = [
            jl(self.smpl_J(p, betas, t))
            for jl, p, t in zip(self.J_layers, poses, trans)
        ]
        #Js = [  jl(self.smpl_J([p, betas, t]))    for jl, p, t in zip(self.J_layers, poses, trans)  ]   # original line (pre-   Mon Feb 10 20:19:03 EST 2020)
        vertices = tf.concat(
            [tf.expand_dims(smpl, axis=-1) for i, smpl in enumerate(smpls)],
            axis=-1)

        ##
        out_dict['vertices'] = vertices
        out_dict['vertices_tposed'] = vertices_tposed
        out_dict['vertices_naked'] = vertices_naked

        ##
        out_dict['vertices'] = vertices
        out_dict['vertices_tposed'] = vertices_tposed
        out_dict['vertices_naked'] = vertices_naked
        out_dict['offsets_h'] = offsets_
        for i in range(NUM):
            out_dict['J_{}'.format(i)] = Js[i]

        vert_cols = tf.reshape(
            tf.gather(self.colormap, tf.reshape(vertexlabel, (-1, ))),
            (-1, config.NVERTS, 3))
        renderered_garms_all = []

        for view in range(NUM):
            renderered_garms_all.append(
                render_colored_batch(
                    vertices[..., view],
                    self.faces,
                    vert_cols,  # [bat],
                    IMG_SIZE,
                    IMG_SIZE,
                    FOCAL_LENGTH,
                    CAMERA_CENTER,
                    np.zeros(3, dtype=np.float32),
                    num_channels=3))

        renderered_garms_all = tf.transpose(renderered_garms_all,
                                            [1, 2, 3, 4, 0])
        out_dict['rendered'] = renderered_garms_all

        lap = compute_laplacian_diff(vertices_tposed, vertices_naked,
                                     self.faces)
        ##
        out_dict['laplacian'] = lap
        return out_dict