Ejemplo n.º 1
0
        def build_discriminator(nf=DISCRIMINATOR_CH, depth=DEPTH, ks=4):
            inputs = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3])
            validity = dm.networks.Discriminator(inputs,
                                                 nf=nf,
                                                 depth=DEPTH,
                                                 ks=4)

            return dm.Model(inputs, validity)
Ejemplo n.º 2
0
        def build_img_encoder():
            input_img = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3], name='input_img')

            img_embedding = dm.networks.Encoder2D(
                input_img, EMBEDING + CAMERA_PARAM, depth=4, nf=64)
            mesh_rec_embeding = dm.layers.Lambda(lambda x: x[..., :EMBEDING])(img_embedding)
            cam_rec_embeding = dm.layers.Lambda(lambda x: dm.K.tanh(x[..., EMBEDING:]) * 3)(img_embedding)

            return dm.Model(input_img, [mesh_rec_embeding, cam_rec_embeding], name='image_encoder')
Ejemplo n.º 3
0
    def build_mesh_encoder():
        input_mesh = dm.layers.Input(shape=[N_VERTICES, inputs_channels],
                                     name='input_mesh')
        mesh_embedding = dm.networks.MeshEncoder(input_mesh,
                                                 EMBEDING,
                                                 graph_laplacians,
                                                 downsampling_matrices,
                                                 filter_list=FILTERS)

        return dm.Model(input_mesh, mesh_embedding, name='mesh_encoder')
Ejemplo n.º 4
0
    def build_img_encoder():
        input_img = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3],
                                    name='input_img')

        img_embedding = dm.networks.Encoder2D(input_img,
                                              EMBEDING,
                                              depth=4,
                                              nf=32)

        return dm.Model(input_img, img_embedding, name='image_encoder')
Ejemplo n.º 5
0
 def build_generator(nf=GENERATOR_CH, depth=DEPTH, name=None, ks=4):
     inputs = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3])
     # unet
     # outputs = dm.networks.UNet(inputs, [INPUT_SHAPE, INPUT_SHAPE, 3], nf=nf, ks=ks)
     # resnet
     outputs = dm.networks.ResNet50(inputs,
                                    [INPUT_SHAPE, INPUT_SHAPE, 3],
                                    nf=nf)
     # hourglass
     # outputs = dm.networks.Hourglass(inputs, [INPUT_SHAPE, INPUT_SHAPE, 3], nf=64, batch_norm='InstanceNormalization2D')
     return dm.Model(inputs, outputs, name=name)
Ejemplo n.º 6
0
        def build_decoder():
            input_embeding = dm.layers.Input(shape=[EMBEDING], name='input_embeding')
            output_mesh = dm.networks.MeshDecoder(
                input_embeding, 
                inputs_channels, 
                graph_laplacians, 
                adj_matrices, 
                upsamling_matrices, 
                polynomial_order=6, 
                filter_list=FILTERS)

            return dm.Model(input_embeding, output_mesh, name='decoder')
Ejemplo n.º 7
0
        def build_discriminator(nf=DISCRIMINATOR_CH, depth=DEPTH, ks=4):
            inputs = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3])
            validity, conv_feature = dm.networks.Discriminator(
                inputs, nf=nf, depth=depth, ks=4, return_endpoints=True)
            classes = dm.networks.conv2d(conv_feature,
                                         N_CLASSES,
                                         DISC_SHAPE,
                                         padding='valid',
                                         activation='softmax')
            classes = dm.layers.Reshape([N_CLASSES])(classes)

            return dm.Model(inputs, [validity, classes])
Ejemplo n.º 8
0
 def build_generator(nf=GENERATOR_CH, depth=DEPTH, name=None, ks=4):
     inputs = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3])
     target_label = dm.layers.Input(shape=[N_CLASSES])
     target_label_conv = dm.layers.RepeatVector(
         INPUT_SHAPE * INPUT_SHAPE)(target_label)
     target_label_conv = dm.layers.Reshape(
         [INPUT_SHAPE, INPUT_SHAPE, N_CLASSES])(target_label_conv)
     merged_inputs = dm.layers.Concatenate()(
         [inputs, target_label_conv])
     # unet
     # outputs = dm.networks.UNet(inputs, [INPUT_SHAPE, INPUT_SHAPE, 3], nf=nf, ks=ks)
     # resnet
     outputs = dm.networks.ResNet50(merged_inputs,
                                    [INPUT_SHAPE, INPUT_SHAPE, 3],
                                    nf=nf,
                                    n_residule=6)
     # hourglass
     # outputs = dm.networks.Hourglass(inputs, [INPUT_SHAPE, INPUT_SHAPE, 3], nf=64, batch_norm='InstanceNormalization2D')
     return dm.Model([inputs, target_label], outputs, name=name)
Ejemplo n.º 9
0
    def model_builder():
        optimizer = dm.optimizers.Adam(lr=LR, clipnorm=1., decay=0.)

        if FLAGS.use_ae and FLAGS.ae_path:

            # encoder
            ae_input = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3],
                                       name='ae_input')
            embeding_output = dm.networks.Encoder2D(ae_input,
                                                    128,
                                                    depth=8,
                                                    nf=32,
                                                    batch_norm=False)
            encoder_model = dm.Model(inputs=[ae_input],
                                     outputs=[embeding_output],
                                     name='encoder_model')

            # decoder
            input_embeding = dm.layers.Input(shape=[
                128,
            ],
                                             name='ae_input_embeding')
            ae_output = dm.networks.Decoder2D(input_embeding,
                                              [INPUT_SHAPE, INPUT_SHAPE, 3],
                                              depth=8,
                                              nf=32,
                                              batch_norm=False)
            decoder_model = dm.Model(inputs=[input_embeding],
                                     outputs=[ae_output],
                                     name='decoder_model')

            # combined model
            ae_model = dm.DeepMachine(
                inputs=[ae_input],
                outputs=[decoder_model(encoder_model(ae_input))])
            ae_model.compile(optimizer=optimizer, loss=['mae'])
            ae_model.trainable = False

        input_image = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3],
                                      name='input_image')
        uvxyz_prediction = dm.networks.Hourglass(
            input_image, [256, 256, 3],
            nf=64,
            batch_norm='InstanceNormalization2D')

        merged_input = dm.layers.Concatenate()([input_image, uvxyz_prediction])

        uvxyz_prediction_refine = dm.networks.Hourglass(
            merged_input, [256, 256, 3],
            nf=64,
            batch_norm='InstanceNormalization2D')

        outputs = [uvxyz_prediction, uvxyz_prediction_refine]
        if FLAGS.use_ae and FLAGS.ae_path:
            uvxyz_prediction_ae = encoder_model(uvxyz_prediction_refine)
            outputs.append(uvxyz_prediction_ae)

        train_model = dm.DeepMachine(inputs=input_image, outputs=outputs)

        def weighted_uv_loss(y_true, y_pred):

            loss = dm.K.mean(weight_mask * dm.K.abs(y_true - y_pred))

            return loss

        train_model.compile(optimizer=optimizer,
                            loss=[weighted_uv_loss, weighted_uv_loss, 'mae'],
                            loss_weights=[1, 1, 1])

        if FLAGS.use_ae and FLAGS.ae_path:
            return train_model, ae_model, encoder_model, decoder_model
        else:
            return train_model