def misconception_fishing_2(input,
                            window_size,
                            depths,
                            strides,
                            objective_function,
                            is_training,
                            pre_count=128,
                            post_count=128,
                            post_layers=1,
                            keep_prob=0.5,
                            internal_keep_prob=0.5,
                            other_objectives=()):

    dt = tf.exp(input[:, 0, :, 0]) - 1
    dt = tf.maximum(dt, 12 * 60 * 60)
    dt = 0.5 * (dt[:, 1:] + dt[:, :-1])

    _, layers = misconception_model(input,
                                    window_size,
                                    depths,
                                    strides,
                                    other_objectives,
                                    is_training,
                                    sub_count=post_count,
                                    sub_layers=2)

    expanded_layers = []
    for i, lyr in enumerate(layers):
        lyr = slim.conv2d(lyr,
                          pre_count, [1, 1],
                          activation_fn=tf.nn.relu,
                          normalizer_fn=slim.batch_norm,
                          normalizer_params={'is_training': is_training})
        expanded_layers.append(utility.repeat_tensor(lyr, 2**i))

    embedding = tf.add_n(expanded_layers)

    for _ in range(post_layers - 1):
        embedding = slim.conv2d(embedding,
                                post_count, [1, 1],
                                activation_fn=tf.nn.relu,
                                normalizer_fn=slim.batch_norm,
                                normalizer_params={'is_training': is_training})
    embedding = slim.conv2d(embedding,
                            post_count, [1, 1],
                            activation_fn=tf.nn.relu,
                            normalizer_fn=None)
    embedding = slim.dropout(embedding, keep_prob, is_training=is_training)

    fishing_outputs = tf.squeeze(slim.conv2d(embedding,
                                             1, [1, 1],
                                             activation_fn=None,
                                             normalizer_fn=None),
                                 squeeze_dims=[1, 3])

    return objective_function.build(fishing_outputs, dt)
Esempio n. 2
0
    def misconception_with_fishing_ranges(self, input, mmsis, is_training):
        """ A misconception tower with additional fishing range classification.

        Args:
            input: a tensor of size [batch_size, 1, width, depth].
            window_size: the width of the conv and pooling filters to apply.
            stride: the downsampling to apply when filtering.
            depth: the depth of the output tensor.
            levels: The height of the tower in misconception layers.

        Returns:
            a tensor of size [batch_size, num_classes].
        """
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.elu):
            net = input

            # Then a tower for classification.
            multiscale_layers = []
            for i in range(self.levels):
                with tf.variable_scope("layer_%d" % i):
                    multiscale_layers.append(utility.repeat_tensor(net, 2**i))

                    net = layers.misconception_with_bypass(
                        net, self.window_size, self.stride, self.feature_depth,
                        is_training)

            # TODO: We currently don't use the last year for fishing classification
            # Since we don't use this for vessel classification currently, perhaps
            # we should rememdy that...

            net = slim.flatten(net)
            net = slim.dropout(net, 0.5, is_training=is_training)
            net = slim.fully_connected(net, 100)
            net = slim.dropout(net, 0.5, is_training=is_training)

            concatenated_multiscale_embedding = tf.concat(3, multiscale_layers)

            fishing_outputs = tf.squeeze(slim.conv2d(
                concatenated_multiscale_embedding,
                1, [1, 1],
                activation_fn=None),
                                         squeeze_dims=[1, 3])

            for of in self.classification_training_objectives:
                of.build(net)

            self.fishing_localisation_objective.build(fishing_outputs)