Exemplo n.º 1
0
    def build_architecture(self):
        x = self.placeholders['shape_batch']
        num_classes = self.num_classes  # Chair/table classification

        x = layers.conv3d(x, 64, 3, strides=2, padding='same', name='conv1', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv1_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv1_relu')
        x = layers.conv3d(x, 128, 3, strides=2, padding='same', name='conv2', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv2_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv2_relu')
        x = layers.conv3d(x, 256, 3, strides=2, padding='same', name='conv3', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv3_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv3_relu')
        x = layers.avg_pooling3d(x, name='avg_pool4')
        x = layers.dense(x, 128, name='fc5', reuse=self.reuse)
        encoder_output = x
        x = layers.dense(x, num_classes, name='fc6', reuse=self.reuse)
        prob = layers.softmax(x, name='softmax_layer')

        output_dict = {
            'logits': x,
            'probabilities': prob,
            'encoder_output': encoder_output,
        }

        return output_dict
Exemplo n.º 2
0
    def build_architecture(self, inputs_dict):
        x = inputs_dict['shape_batch']
        if cfg.CONST.DATASET == 'shapenet':
            num_classes = 2  # Chair/table classification
        elif cfg.CONST.DATASET == 'primitives':
            train_inputs_dict = open_pickle(cfg.DIR.PRIMITIVES_TRAIN_DATA_PATH)
            val_inputs_dict = open_pickle(cfg.DIR.PRIMITIVES_VAL_DATA_PATH)
            test_inputs_dict = open_pickle(cfg.DIR.PRIMITIVES_TEST_DATA_PATH)
            f = lambda inputs_dict: list(inputs_dict['category_matches'].keys())
            categories = f(train_inputs_dict) + f(val_inputs_dict) + f(test_inputs_dict)
            categories = list(set(categories))
            num_classes = len(categories)
        else:
            raise ValueError('Please select a valid dataset')

        x = layers.conv3d(x, 64, 3, strides=2, padding='same', name='conv1', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv1_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv1_relu')
        x = layers.conv3d(x, 128, 3, strides=2, padding='same', name='conv2', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv2_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv2_relu')
        x = layers.conv3d(x, 256, 3, strides=2, padding='same', name='conv3', reuse=self.reuse)
        x = tf.layers.batch_normalization(x, training=self.is_training, name='conv3_batch_norm',
                                          reuse=self.reuse)
        x = layers.relu(x, name='conv3_relu')
        # get intermediate results
        intermediate_output = x
        x = layers.avg_pooling3d(x, name='avg_pool4')
        x = layers.dense(x, 128, name='fc5', reuse=self.reuse)
        encoder_output = x
        x = layers.dense(x, num_classes, name='fc6', reuse=self.reuse)
        prob = layers.softmax(x, name='softmax_layer')

        output_dict = {
            'logits': x,
            'probabilities': prob,
            'encoder_output': encoder_output,
            'intermediate_output': intermediate_output
        }

        return output_dict
Exemplo n.º 3
0
    def build_architecture(self, inputs_dict):
        print('--> building 3D GAN discriminator architecture')

        leaky_relu_fn = layers.leaky_relu(leak=0.2)

        with tf.variable_scope('architecture', reuse=self.reuse):
            x = inputs_dict['shape_batch']
            print('\t\tinput', x.get_shape())

            x = layers.conv3d(x,
                              filters=64,
                              kernel_size=4,
                              strides=2,
                              padding='SAME',
                              activation=None,
                              name='conv1',
                              reuse=self.reuse)
            x = leaky_relu_fn(x, name='conv1_lrelu')
            x = layers.conv3d(x,
                              filters=128,
                              kernel_size=4,
                              strides=2,
                              padding='SAME',
                              activation=None,
                              name='conv2',
                              reuse=self.reuse)
            x = leaky_relu_fn(x, name='conv2_lrelu')
            x = layers.conv3d(x,
                              filters=256,
                              kernel_size=4,
                              strides=2,
                              padding='SAME',
                              activation=None,
                              name='conv3',
                              reuse=self.reuse)
            x = leaky_relu_fn(x, name='conv3_lrelu')

            # Text embedding input
            embedding_fc_dim = 256

            # Add FC layer
            embedding_batch = inputs_dict['text_encoding_without_noise']
            fc_embedding_output = layers.dense(embedding_batch,
                                               embedding_fc_dim,
                                               activation=None,
                                               name='fc_embedding_1')
            fc_embedding_output = leaky_relu_fn(fc_embedding_output,
                                                name='fc_embedding_1_lrelu')

            # Add FC layer
            fc_embedding_output = layers.dense(fc_embedding_output,
                                               embedding_fc_dim,
                                               activation=None,
                                               name='fc_embedding_2')
            fc_embedding_output = leaky_relu_fn(fc_embedding_output,
                                                name='fc_embedding_2_lrelu')

            x = layers.conv3d(x,
                              filters=512,
                              kernel_size=4,
                              strides=2,
                              padding='SAME',
                              activation=None,
                              name='conv4',
                              reuse=self.reuse)
            x = leaky_relu_fn(x, name='conv4_lrelu')

            x = layers.conv3d(x,
                              filters=256,
                              kernel_size=2,
                              strides=2,
                              padding='SAME',
                              activation=None,
                              name='conv5',
                              reuse=self.reuse)
            x = leaky_relu_fn(x, name='conv5_lrelu')

            x = layers.reshape(x, [cfg.CONST.BATCH_SIZE, -1],
                               scope='reshape_to_concat')
            x = layers.concat([x, fc_embedding_output],
                              axis=1,
                              name='concat_text_shape')

            # Add FC layer
            x = layers.dense(x, 128, activation=None, name='fc6')
            x = leaky_relu_fn(x, name='fc6_lrelu')

            # Add FC layer
            x = layers.dense(x, 64, activation=None, name='fc7')
            x = leaky_relu_fn(x, name='fc7_lrelu')

            # Add FC layer
            logits = layers.dense(x, 1, activation=None, name='fc8')

            sigmoid_output = tf.sigmoid(logits)

        return {'sigmoid_output': sigmoid_output, 'logits': logits}