Пример #1
0
    def get_initial_state(self, inputs):
        initial_state = K.zeros_like(inputs)
        initial_state = K.sum(initial_state, axis=(1, 2))
        initial_state = K.expand_dims(initial_state)
        initial_state = K.tile(initial_state, [1, self.units])  # (samples, output_dim)
        n = K.identity(initial_state)
        d = K.identity(initial_state)
        h = K.identity(initial_state)

        dtype = initial_state.dtype.name
        min_value = np.array([1E38]).astype(dtype).item()
        a_max = K.identity(initial_state) - min_value
        h = h + self.cell.recurrent_activation(K.expand_dims(self.cell.initial_attention, axis=0))

        return [n, d, h, a_max]
Пример #2
0
def attach_multibox_head(network, source_layer_names,
                         num_priors=4, num_classes=10, activation='softmax'):
    heads = []
    for idx, layer_name in enumerate(source_layer_names):
        source_layer = network.get_layer(layer_name).output

        # Classification
        clf = Conv2D(num_priors * num_classes, (3, 3),
                     padding='same', name=f'clf_head{idx}_logit')(source_layer)
        clf = Reshape((-1, num_classes),
                      name=f'clf_head{idx}_reshape')(clf)
        if activation == 'softmax':
            clf = Softmax(axis=-1, name=f'clf_head{idx}')(clf)
        elif activation == 'sigmoid':
            clf = sigmoid(clf)
        else:
            raise ValueError('activation은 {softmax,sigmoid}중에서 되어야 합니다.')

        # Localization
        loc = Conv2D(num_priors * 4, (3,3), padding='same',
                     name=f'loc_head{idx}')(source_layer)
        loc = Reshape((-1,4),
                      name=f'loc_head{idx}_reshape')(loc)
        head = Concatenate(axis=-1, name=f'head{idx}')([clf, loc])
        heads.append(head)

    if len(heads) > 1:
        predictions = Concatenate(axis=1, name='predictions')(heads)
    else:
        predictions = K.identity(heads[0],name='predictions')
    return predictions
Пример #3
0
    def __init__(self,
                 input_tensor,
                 losses,
                 input_range=(0, 255),
                 wrt_tensor=None,
                 norm_grads=True):
        """Creates an optimizer that minimizes weighted loss function.

        Args:
            input_tensor: An input tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
                channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.
            losses: List of ([Loss](vis.losses#Loss), weight) tuples.
            input_range: Specifies the input range as a `(min, max)` tuple. This is used to rescale the
                final optimized input to the given range. (Default value=(0, 255))
            wrt_tensor: Short for, with respect to. This instructs the optimizer that the aggregate loss from `losses`
                should be minimized with respect to `wrt_tensor`.
                `wrt_tensor` can be any tensor that is part of the model graph. Default value is set to None
                which means that loss will simply be minimized with respect to `input_tensor`.
            norm_grads: True to normalize gradients. Normalization avoids very small or large gradients and ensures
                a smooth gradient gradient descent process. If you want the actual gradient
                (for example, visualizing attention), set this to false.
        """
        self.input_tensor = input_tensor
        self.input_range = input_range
        self.loss_names = []
        self.loss_functions = []
        self.wrt_tensor = self.input_tensor if wrt_tensor is None else wrt_tensor
        if self.input_tensor is self.wrt_tensor:
            self.wrt_tensor_is_input_tensor = True
            self.wrt_tensor = K.identity(self.wrt_tensor)
        else:
            self.wrt_tensor_is_input_tensor = False

        overall_loss = None
        for loss, weight in losses:
            # Perf optimization. Don't build loss function with 0 weight.
            if weight != 0:
                loss_fn = weight * loss.build_loss()
                overall_loss = loss_fn if overall_loss is None else overall_loss + loss_fn
                self.loss_names.append(loss.name)
                self.loss_functions.append(loss_fn)

        # Compute gradient of overall with respect to `wrt` tensor.
        if self.wrt_tensor_is_input_tensor:
            grads = K.gradients(overall_loss, self.input_tensor)[0]
        else:
            grads = K.gradients(overall_loss, self.wrt_tensor)[0]
        if norm_grads:
            grads = K.l2_normalize(grads)

        # The main function to compute various quantities in optimization loop.
        self.compute_fn = K.function(
            [self.input_tensor, K.learning_phase()],
            self.loss_functions + [overall_loss, grads, self.wrt_tensor])
Пример #4
0
def attach_multibox_head(network,
                         source_layer_names,
                         num_priors=4,
                         num_classes=11,
                         activation='softmax'):  # 10->11
    heads = []
    batch_size = 64  # OpenCV에서 인식을못해서 하드코딩한다 reshape를 인식못한다
    for idx, layer_name in enumerate(source_layer_names):
        source_layer = network.get_layer(layer_name).output

        # OpenCV Loading Error
        # "Can't create layer \"loc_head2_reshape_2/Shape\" of type \"Shape\""
        # 조치 : class개수를 10에서 11로 바꿔줌

        w = source_layer.get_shape().as_list()[1]
        h = source_layer.get_shape().as_list()[2]
        print("w : ", w)
        print("h : ", h)
        print("num_priors : ", num_priors)
        # Classification
        clf = Conv2D(num_priors * num_classes, (3, 3),
                     padding='same',
                     name=f'clf_head{idx}_logit')(source_layer)
        print("clf shape입니다 : ", clf.shape)
        clf = tf.reshape(clf,
                         shape=(batch_size, w * h * num_priors, num_classes),
                         name=f'clf_head{idx}_reshape')
        # clf = Reshape((w*h*num_priors, num_classes), name=f'clf_head{idx}_reshape')(clf)  # (-1, num_classes) # w*h*num_priors
        print("clf의 reshape 후입니다 : ", clf.shape)
        if activation == 'softmax':
            clf = Softmax(axis=-1, name=f'clf_head{idx}')(clf)
        elif activation == 'sigmoid':
            clf = sigmoid(clf)
        else:
            raise ValueError('activation은 {softmax,sigmoid}중에서 되어야 합니다.')

        # Localization
        loc = Conv2D(num_priors * 4, (3, 3),
                     padding='same',
                     name=f'loc_head{idx}')(source_layer)
        print("loc의 shape입니다 : ", loc.shape)
        loc = tf.reshape(loc,
                         shape=(batch_size, w * h * num_priors, 4),
                         name='loc_head{}_reshape'.format(idx))
        # loc = Reshape((w*h*num_priors, 4), name=f'loc_head{idx}_reshape')(loc)  #Reshape((-1, 4),
        print("loc의 reshape 후입니다 : ", loc.shape)
        head = Concatenate(axis=-1, name=f'head{idx}')([clf, loc])
        heads.append(head)

    if len(heads) > 1:
        predictions = Concatenate(axis=1, name='predictions')(heads)
    else:
        predictions = K.identity(heads[0], name='predictions')
    return predictions
Пример #5
0
 def call(self, inputs, training=None, mask=None):
     identityInput = K.identity(inputs)
     stacks = K.array_ops.unstack(inputs, axis=1)
     ### [b,n]
     tempList = []
     for oneTimeStepTensor in stacks:
         ### [b,1]
         bTensor = self.dense0(oneTimeStepTensor)
         bTensor = K.relu(bTensor)
         bTensor = self.dense1(bTensor)
         tempList.append(bTensor)
     ### [b,t,1]
     stackedTensor = K.array_ops.stack(tempList, axis=1)
     softMaxTensor = K.softmax(stackedTensor, axis=1)
     ###[b,t,1]
     weightEdTensor = K.math_ops.multiply(identityInput, softMaxTensor)
     unstack = K.array_ops.unstack(weightEdTensor, axis=1)
     temp1List = []
     for oneTensor in unstack:
         temp1List.append(oneTensor)
     return K.math_ops.add_n(temp1List)
Пример #6
0
    def call(self, inputs, training=None, mask=None):
        batchTensor = tf.stop_gradient(
            tf.nn.embedding_lookup(params=self.iniEmbeddingMatrix, ids=inputs))
        # print(batchTensor.shape)
        # print(positionTensor.shape)
        denseTrans = self.denseTrans(batchTensor)

        thisTransformer = K.identity(denseTrans)
        for i in range(self._transformerLayers):
            thisTransformer = self.transformerList[i](thisTransformer,
                                                      training=training)

        flattenTensor = self.flat(thisTransformer)

        dense0Tensor = self.dense0(flattenTensor)
        bn0Tensor = self.bn0(dense0Tensor, training=training)
        actT = self.pRelu(bn0Tensor)
        dropTensor = self.dropout(actT, training=training)

        dense1Tensor = self.dense1(dropTensor)
        return tf.nn.sigmoid(dense1Tensor)
Пример #7
0
    def call(self, inputs, training=None, mask=None):
        sentence = inputs[0]
        position = inputs[1]
        batchTensor = tf.stop_gradient(tf.nn.embedding_lookup(params=self.iniEmbeddingMatrix, ids=sentence))
        batchTensor = K.stop_gradient(K.math_ops.multiply(batchTensor,K.math_ops.sqrt(self.dModel)))

        positionTensor = self.positionEmbeddingMatrix(position)
        positionTensor = K.math_ops.multiply(positionTensor,K.stop_gradient(K.math_ops.sqrt(self.dModel)))
        # print(batchTensor.shape)
        # print(positionTensor.shape)

        eDropTensor = self.embeddingDrop(K.math_ops.multiply(K.math_ops.add(batchTensor,positionTensor),
                                                             K.stop_gradient(tf.convert_to_tensor(1. / 2.,dtype=tf.float32))),
                                         training = training)
        denseTrans = self.denseTrans(eDropTensor)

        thisTransformer = K.identity(denseTrans)
        for i in range(self._transformerLayers):
            thisTransformer = self.transformerList[i](thisTransformer,training=training)

        flattenTensor = self.flat(thisTransformer)
        dense1Tensor = self.dense1(flattenTensor)
        return tf.nn.softmax(dense1Tensor,axis=-1)
Пример #8
0
 def call(self, inputs, **kwargs):
     return [
         K.identity(self.bias_context),
         K.identity(self.bias_relative),
     ]