Example #1
0
    def __init__(self, dcs: list, input_s: InputLayer, name='MakiSSD'):
        self.dcs = dcs
        self.name = str(name)

        inputs = [input_s]
        graph_tensors = {}
        outputs = []
        for dc in dcs:
            confs, offs = dc.get_conf_offsets()
            graph_tensors.update(confs.get_previous_tensors())
            graph_tensors.update(offs.get_previous_tensors())
            graph_tensors.update(confs.get_self_pair())
            graph_tensors.update(offs.get_self_pair())

            outputs += [confs, offs]

        super().__init__(graph_tensors, outputs, inputs)
        self.input_shape = input_s.get_shape()
        self.batch_sz = self.input_shape[0]

        self._generate_default_boxes()
        self._prepare_inference_graph()
        # Get number of classes. It is needed for Focal Loss
        self._num_classes = self.dcs[0].class_number
        # For training
        self._training_vars_are_ready = False
Example #2
0
 def __init__(self,
              input: InputLayer,
              output: MakiTensor,
              name='MakiClassificator'):
     graph_tensors = copy(output.get_previous_tensors())
     # Add output tensor to `graph_tensors` since it doesn't have it.
     # It is assumed that graph_tensors contains ALL THE TENSORS graph consists of.
     graph_tensors.update(output.get_self_pair())
     outputs = [output]
     inputs = [input]
     super().__init__(graph_tensors, outputs, inputs)
     self.name = str(name)
     self._batch_sz = input.get_shape()[0]
     self._images = self._input_data_tensors[0]
     self._inference_out = self._output_data_tensors[0]
     # For training
     self._training_vars_are_ready = False
Example #3
0
def ssp_model():
    in_x = InputLayer([1, 64, 64, 3], name='input_image')
    x = ConvLayer(kw=3, kh=3, in_f=3, out_f=64, stride=2, name='conv1_1')(in_x)
    x = ConvLayer(kw=3, kh=3, in_f=64, out_f=64, stride=2, name='conv1_2')(x)   # [16, 16]
    head1 = make_head(x, 1)
    x = ConvLayer(kw=3, kh=3, in_f=64, out_f=64, stride=2, name='conv2')(x)     # [8, 8]
    head2 = make_head(x, 2)
    x = ConvLayer(kw=3, kh=3, in_f=64, out_f=64, stride=2, name='conv3')(x)     # [4, 4]
    head3 = make_head(x, 3)
    x = ConvLayer(kw=3, kh=3, in_f=64, out_f=64, stride=2, name='conv4')(x)     # [2, 2]
    head4 = make_head(x, 4)
    x = ConvLayer(kw=1, kh=1, in_f=64, out_f=64, stride=2, name='conv5')(x)     # [1, 1]
    head5 = make_head(x, 5)

    model = SSPModel(
        in_x=in_x,
        heads=[head1, head2, head3, head4, head5]
    )
    return model
Example #4
0
    def setup_model(model_config, gen_layer, sess):
        shape = gen_layer.get_shape()
        # Change batch_size to 1
        shape[0] = 1
        # Change image size to dynamic size
        shape[1] = None
        shape[2] = None
        name = gen_layer.get_name()

        input_layer = InputLayer(input_shape=shape, name=name)
        model = PEModel.from_json(model_config[ModelAssembler.ARCH_PATH],
                                  input_tensor=input_layer)
        model.set_session(sess)

        # Load pretrained weights
        weights_path = model_config[ModelAssembler.WEIGHTS_PATH]
        pretrained_layers = model_config[ModelAssembler.PRETRAINED_LAYERS]
        if weights_path is not None:
            model.load_weights(weights_path, layer_names=pretrained_layers)

        return model
Example #5
0
    def get_embedding(self):
        return self._custom_embedding


# For debug
if __name__ == '__main__':
    # Generate points around a circle
    phi = np.linspace(0, 2 * np.pi, num=100)
    x = np.cos(phi) * 0.7 + [0]
    y = np.sin(phi) * 0.7 + [0]
    points = np.stack([x, y], axis=-1)

    from makiflow.layers import InputLayer

    # RUN A SANITY CHECK FIRST
    in_x = InputLayer(input_shape=[1, 3, 3, 100 * 2], name='offsets')
    # Never pass in a numpy array to the `custom_embedding` argument. Always use list.
    coords_ish = SkeletonEmbeddingLayer(embedding_dim=None, name='TestEmbedding', custom_embedding=points)(in_x)

    print('Coords MakiTensor', coords_ish)
    print('Coords TfTensor', coords_ish.get_data_tensor())

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    coords = sess.run(
        coords_ish.get_data_tensor(),
        feed_dict={
            in_x.get_data_tensor(): np.zeros(shape=[1, 3, 3, 200], dtype='float32')
        }
    )
Example #6
0
    def training_forward(self, X):
        return self.forward(X, computation_mode=MakiRestorable.TRAINING_MODE)

    def to_dict(self):
        return {
            MakiRestorable.FIELD_TYPE: self.__class__.__name__,
            MakiRestorable.PARAMS: {
                MakiRestorable.NAME: self.get_name(),
                self.IN_F: self._in_f,
                self.KQ_DIM: self._kq_dim
            }
        }


MakiBuilder.register_layers({
    PositionalEncodingLayer.__name__:
    PositionalEncodingLayer,
    AttentionLayer.__name__:
    AttentionLayer,
    SpatialAttentionLayer.__name__:
    SpatialAttentionLayer
})

if __name__ == '__main__':
    from makiflow.layers import InputLayer
    x = InputLayer(input_shape=[None, 32, 12, 64], name='name')
    x = SpatialAttentionLayer(in_f=64, name='attention')(x)
    print(x)
    print(x.get_parent_layer())
Example #7
0
    def get_human_indicators(self):
        return self._human_indicators

    def get_description(self):
        description = self._context
        description = description + f'/GridSize={self.get_grid_size()}'
        description = description + f'/BboxConfig={self.get_bbox_configuration()}'
        return description


# For debug
if __name__ == '__main__':
    from makiflow.layers import InputLayer
    batch_size = 1
    n_points = 10
    offsets = InputLayer(input_shape=[batch_size, 3, 3, n_points * 2], name='offsets')
    coords = SkeletonEmbeddingLayer(embedding_dim=n_points, name='SkeletonEmbedding')(offsets)
    point_indicators = InputLayer(input_shape=[batch_size, 3, 3, n_points], name='point_indicators')
    human_indicators = InputLayer(input_shape=[batch_size, 3, 3, 1], name='human_indicators')

    head = Head(coords, point_indicators, human_indicators)
    print('Bbox configuration:', head.get_bbox_configuration())
    print('Bbox coordinates:', head.get_bbox())
    print(head.get_description())

    from makiflow.core.debug import DebugContext

    with DebugContext('Spatial shape checking.'):
        offsets = InputLayer(input_shape=[batch_size, 2, 3, n_points * 2], name='offsets')
        coords = SkeletonEmbeddingLayer(embedding_dim=n_points, name='SkeletonEmbedding')(offsets)
        Head(coords, point_indicators, human_indicators)
Example #8
0
        # right
        [12, 14],
        [14, 16],
        [16, 18],
        # Additional limbs
        [5, 7],
        [4, 6],
    ]
    """
    kp : tf.Tensor of shape [batch, c, n_people, 2]
            Tensor of keypoints coordinates.
    masks : tf.Tensor of shape [batch, c, n_people, 1]
    """
    im_size = [512, 512]
    paf_sigma = 20
    keypoints = InputLayer(input_shape=[32, 24, 8, 2], name='keypoints')
    masks = InputLayer(input_shape=[32, 24, 8, 1], name='keypoints')

    paf_layer = PAFLayer(im_size=im_size,
                         sigma=paf_sigma,
                         skeleton=CONNECT_INDEXES_FOR_PAFF)
    paf = paf_layer([keypoints, masks])

    sess = tf.Session()
    paf_shape = sess.run(tf.shape(paf.get_data_tensor()),
                         feed_dict={
                             keypoints.get_data_tensor():
                             np.random.randn(32, 24, 8, 2),
                             masks.get_data_tensor():
                             np.random.randn(32, 24, 8, 1)
                         })
Example #9
0
            axes=self._axes
        )

        super().track_loss(dice_loss, DiceTrainer.DICE_LOSS)
        return dice_loss


TrainerBuilder.register_trainer(DiceTrainer)

if __name__ == '__main__':
    from makiflow.models.classificator import Classificator
    from makiflow.layers import InputLayer

    # SEGMENTATION CASE
    print('SEGMENTATION CASE------------------------------------------------------------------------------------------')
    x = InputLayer(input_shape=[32, 128, 128, 3], name='input')

    model = Classificator(in_x=x, out_x=x)
    TrainerBuilder.trainer_from_dict(
        model,
        None,
        None,
        {
            "type": "DiceTrainer",
            "params": {
                "axes": [1, 2, 3],
                "eps": 0.000001
            }
        }
    )
    trainer = DiceTrainer(model=model, train_inputs=[x])
Example #10
0

# CHECK MODEL'S PREDICT
if __name__ == '__main__':
    from .embedding_layer import SkeletonEmbeddingLayer

    # Generate points around a circle
    phi = np.linspace(0, 2 * np.pi, num=100)
    x = np.cos(phi) * 1.0 + [0]
    y = np.sin(phi) * 1.0 + [0]
    points = np.stack([x, y], axis=-1)

    from makiflow.layers import InputLayer

    # RUN A SANITY CHECK FIRST
    in_x = InputLayer(input_shape=[1, 3, 3, 100 * 2], name='offsets')
    # Never pass in a numpy array to the `custom_embedding` argument. Always use list.
    coords = SkeletonEmbeddingLayer(embedding_dim=None,
                                    name='TestEmbedding',
                                    custom_embedding=points)(in_x)

    print('Coords MakiTensor', coords)
    print('Coords TfTensor', coords.get_data_tensor())

    point_indicators = InputLayer(input_shape=[1, 3, 3, 100],
                                  name='point_indicators')
    human_indicators = InputLayer(input_shape=[1, 3, 3, 1],
                                  name='human_indicators')

    from .head import Head