Esempio n. 1
0
    def call(self, inputs):
        input_shape = self.in_shape
        if self.data_format == 'channels_first':
            x = K.arange(0, input_shape[1], dtype=K.floatx())
            y = K.arange(0, input_shape[2], dtype=K.floatx())
        else:
            x = K.arange(0, input_shape[0], dtype=K.floatx())
            y = K.arange(0, input_shape[1], dtype=K.floatx())

        x = x / K.max(x)
        y = y / K.max(y)

        loc_x, loc_y = tf.meshgrid(x, y, indexing='ij')

        if self.data_format == 'channels_first':
            loc = K.stack([loc_x, loc_y], axis=0)
        else:
            loc = K.stack([loc_x, loc_y], axis=-1)

        location = K.expand_dims(loc, axis=0)
        if self.data_format == 'channels_first':
            location = K.permute_dimensions(location, pattern=[0, 2, 3, 1])

        location = tf.tile(location, [K.shape(inputs)[0], 1, 1, 1])

        if self.data_format == 'channels_first':
            location = K.permute_dimensions(location, pattern=[0, 3, 1, 2])

        return location
    def call(self, input):
        input_shape = backend.shape(input)

        num_rows = input_shape[1]
        num_cols = input_shape[2]

        row_length = backend.cast(num_rows,
                                  'float32') / (2 * self.circle_number)
        col_length = backend.cast(num_cols,
                                  'float32') / (2 * self.circle_number)

        outputs = []
        pool_circle = []
        for jy in range(self.circle_number * 2):
            for ix in range(self.circle_number * 2):
                x1 = ix * col_length
                x2 = ix * col_length + col_length
                y1 = jy * row_length
                y2 = jy * row_length + row_length

                x1 = backend.cast(backend.round(x1), 'int32')
                x2 = backend.cast(backend.round(x2), 'int32')
                y1 = backend.cast(backend.round(y1), 'int32')
                y2 = backend.cast(backend.round(y2), 'int32')

                new_shape = [input_shape[0], y2 - y1, x2 - x1, input_shape[3]]

                x_crop = input[:, y1:y2, x1:x2, :]
                xm = backend.reshape(x_crop, new_shape)
                if self.pool_mode == 'avg':
                    pooled_val = backend.mean(xm, axis=(1, 2))
                else:
                    pooled_val = backend.max(xm, axis=(1, 2))
                pool_circle.append(
                    backend.reshape(xm, (input_shape[0], -1, input_shape[3])))

        circle_index = self._circle_index(self.circle_number)
        for cidx in circle_index:
            circle_val = [pool_circle[idx] for idx in cidx]
            if self.pool_mode == 'avg':
                pooled_val = backend.mean(backend.concatenate(circle_val,
                                                              axis=1),
                                          axis=1)
            else:
                pooled_val = backend.max(backend.concatenate(circle_val,
                                                             axis=1),
                                         axis=1)

            outputs.append(pooled_val)

        outputs = backend.concatenate(outputs)
        outputs = backend.reshape(
            outputs,
            (input_shape[0], self.nb_channels * self.num_outputs_per_channel))

        return outputs
Esempio n. 3
0
def filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """ Filter YOLO boxes based on object and class confidence.

    Parameters
    ----------
    box_confidence: tf.Tensor
        Probability estimate for whether each box contains any object.
    boxes: tf.Tensor
        Bounding boxes
    box_class_probs: tf.Tensor
        Probability distribution estimate for each box over class labels.
    threshold: float
        Threshold value

    Returns
    -------
    _boxes, scores, classes: (tf.Tensor, tf.Tensor, tf.Tensor)

    """

    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    _boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)

    return _boxes, scores, classes
Esempio n. 4
0
    def call(self, input_feature):
        kernel_size = 3

        # if K.image_data_format() == "channels_first":
        #     channel = input_feature._keras_shape[1]
        #     cbam_feature = Permute((2, 3, 1))(input_feature)
        # else:
        #     channel = input_feature._keras_shape[-1]
        cbam_feature = input_feature

        print(input_feature.shape)
        avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
        #         # assert avg_pool._keras_shape[-1] == 1
        #print(avg_pool.shape)
        max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)

        #print(max_pool.shape)
        # assert max_pool._keras_shape[-1] == 1
        concat = Concatenate(axis=3)([avg_pool, max_pool])
        #print(concat.shape)
        # assert concat._keras_shape[-1] == 2
        cbam_feature = Conv2D(filters=1,
                              kernel_size=kernel_size,
                              strides=1,
                              padding='same',
                              kernel_initializer='he_normal',
                              use_bias=False)(concat)
        # assert cbam_feature._keras_shape[-1] == 1

        # if K.image_data_format() == "channels_first":
        #    cbam_feature = Permute((3, 1, 2))(cbam_feature)
        print(cbam_feature.shape)
        res = K.sigmoid(cbam_feature)
        #print(res.shape)
        return res
Esempio n. 5
0
def compile_saliency_function(model, act_num):
    input_sig = model.get_input_at(0)
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_output = layer_dict[act_num].output
    max_output = K.max(layer_output, axis=2)
    saliency = K.gradients(K.sum(max_output), input_sig)[0]
    return K.function([input_sig, K.learning_phase()], [saliency])
Esempio n. 6
0
    def call(self, inputs, **kwargs):
        backend = retina_net_tensorflow_backend()

        boxes, classification, detections = inputs

        # TODO: support batch size > 1.
        boxes = boxes[0]
        classification = classification[0]
        detections = detections[0]

        scores = K.max(classification, axis=1)

        # selecting best anchors theoretically improves speed at the cost of minor performance
        if self.top_k:
            scores, indices = backend.top_k(scores, self.top_k, sorted=False)
            boxes = K.gather(boxes, indices)
            classification = K.gather(classification, indices)
            detections = K.gather(detections, indices)

        indices = backend.non_max_suppression(boxes,
                                              scores,
                                              max_output_size=self.max_boxes,
                                              iou_threshold=self.nms_threshold)

        detections = K.gather(detections, indices)
        return K.expand_dims(detections, axis=0)
Esempio n. 7
0
    def call(self, inputs, mask=None, training=None):
        inputs, relatives, memories, bias_context, bias_relative = inputs
        full = K.concatenate([memories, inputs], axis=1)      # (batch, prev_len + seq_len, units)
        w_q = K.dot(inputs, self.kernel_q)                    # (batch, seq_len, units)
        w_kv = K.dot(full, self.kernel_kv)                    # (batch, prev_len + seq_len, units * 2)
        w_r = K.dot(relatives, self.kernel_r)                 # (batch, prev_len + seq_len, units)
        if self.use_bias:
            w_q = K.bias_add(w_q, self.bias_q)
            w_kv = K.bias_add(w_kv, self.bias_kv)
            w_r = K.bias_add(w_r, self.bias_r)
        if self.activation is not None:
            w_q = self.activation(w_q)
            w_kv = self.activation(w_kv)
            w_r = self.activation(w_r)

        w_k = w_kv[:, :, :self.units]                         # (batch, prev_len + seq_len, units)
        w_v = w_kv[:, :, self.units:]                         # (batch, prev_len + seq_len, units)

        w_qc = K.bias_add(w_q, bias_context)
        w_qc = self._reshape_to_batches(w_qc)                 # (batch * n_head, seq_len, units_head)
        w_k = self._reshape_to_batches(w_k)                   # (batch * n_head, prev_len + seq_len, units_head)
        a_context = K.batch_dot(w_qc, w_k, axes=2)            # (batch * n_head, seq_len, prev_len + seq_len)

        w_qr = K.bias_add(w_q, bias_relative)
        w_qr = self._reshape_to_batches(w_qr)                 # (batch * n_head, seq_len, units_head)
        w_r = self._reshape_to_batches(w_r)                   # (batch * n_head, prev_len + seq_len, units_head)
        a_relative = K.batch_dot(w_qr, w_r, axes=2)           # (batch * n_head, seq_len, prev_len + seq_len)
        a_relative = self._relative_shift(a_relative)         # (batch * n_head, seq_len, prev_len + seq_len)

        att = (a_context + a_relative) / K.sqrt(K.constant(self.units_head, dtype=K.floatx()))
        exp = K.exp(att - K.max(att, axis=-1, keepdims=True))

        q_len, k_len = K.shape(w_q)[1], K.shape(w_k)[1]
        indices = K.expand_dims(K.arange(0, k_len), axis=0)
        upper = K.expand_dims(K.arange(k_len - q_len, k_len), axis=-1)
        exp *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0)
        if mask is not None and mask[0] is not None:
            mask = K.cast(mask[0], K.floatx())
            mask = K.concatenate([K.ones_like(memories[:, :, 0]), mask], axis=1)
            exp *= K.expand_dims(self._reshape_mask(mask), axis=1)

        att = exp / K.sum(exp, axis=-1, keepdims=True)
        if self.att_drop_layer is not None:
            att = self.att_drop_layer(att, training=training)
        w_v = self._reshape_to_batches(w_v)                   # (batch * n_head, prev_len + seq_len, units_head)
        w_o = K.batch_dot(att, w_v)                           # (batch * n_head, seq_len, units_head)

        w_o = self._reshape_from_batches(w_o)                 # (batch, seq_len, units)
        w_o = K.dot(w_o, self.kernel_o)                       # (batch, seq_len, units)
        if self.use_bias:
            w_o = K.bias_add(w_o, self.bias_o)
        if self.activation is not None:
            w_o = self.activation(w_o)

        # Add shape information to tensor when using `tf.keras`
        input_shape = K.int_shape(inputs)
        if input_shape[1] is not None:
            w_o = K.reshape(w_o, (-1,) + input_shape[1:])
        return w_o
Esempio n. 8
0
    def build_loss(self):
        # Infinity norm
        if np.isinf(self.p):
            value = K.max(self.img)
        else:
            value = K.pow(K.sum(K.pow(K.abs(self.img), self.p)), 1. / self.p)

        return normalize(self.img, value)
Esempio n. 9
0
    def layer(x):
        x_mean = K.expand_dims(K.mean(x, axis=1), axis=1)
        x_max = K.expand_dims(K.max(x, axis=1), axis=1)
        x = concatenate([x, x_mean, x_max], axis=1)
        x = building_block(filters)(x)
        x = Conv3D(classes, 1, data_format=DATA_FORMAT)(x)

        return x
Esempio n. 10
0
 def loop_body(b, ignore_mask):
     true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                object_mask_bool[b, ..., 0])
     iou = box_iou(pred_box[b], true_box)
     best_iou = K.max(iou, axis=-1)
     ignore_mask = ignore_mask.write(
         b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
     return b + 1, ignore_mask
Esempio n. 11
0
def compile_saliency_function(net, activation_layer):
    input_img = net.input
    layer_dict = dict([(layer.name, layer) for layer in net.layers[1:]])
    layer_output = layer_dict[activation_layer].output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_img)[0]

    return K.function([input_img, K.learning_phase()], [saliency])
Esempio n. 12
0
def _softmax(x):
    """
    Softmax that works on ND inputs.
    """
    channel_axis = get_channel_axis(K.ndim(x) - 2)
    e = K.exp(x - K.max(x, axis=channel_axis, keepdims=True))
    s = K.sum(e, axis=channel_axis, keepdims=True)
    return e / s
Esempio n. 13
0
def mystockloss(y_true, y_pred, e=0.1):
    # return (1-e) * abs(y_true - y_pred) * K.max([K.max([y_true, y_pred]) - 0.0, 0.0])
    # return (1-e) * abs(y_true - y_pred) * K.pow(1.2, K.max([y_true, y_pred])) / 100.0
    # max_y = K.max([y_true, y_pred]) / 10.0  # predict_day_count
    # return (1-e) * abs(y_true - y_pred) * tf.where(tf.greater(max_y, 5.0), max_y / 5.0, K.pow(1.584893, max_y) / 10.0)

    # max_y = K.max([y_true, y_pred]) / 10.0  # predict_day_count
    # return (1-e) * abs(y_true - y_pred) * K.pow(2.0, max_y) / 10.0
    return abs(y_true - y_pred) / 10.0 * K.max([y_true, y_pred, y_true * 0.0])
Esempio n. 14
0
def _view_pool(views):
    """
    this is the ViewPooling in the paper
    :param views: the NUM_VIEWS outputs of CNN1
    """
    expanded = [K.expand_dims(view, 0) for view in views]
    concated = K.concatenate(expanded, 0)
    reduced = K.max(concated, 0)
    return reduced
Esempio n. 15
0
def softmax_with_mask(tensor_and_mask):
    input_tensor, mask_tensor = tensor_and_mask
    min_tensor = K.min(input_tensor, axis=1, keepdims=True)
    positive_tensor = (min_tensor - input_tensor) * mask_tensor
    max_tensor = K.max(positive_tensor, axis=1, keepdims=True)
    exp_tensor = K.exp(positive_tensor - max_tensor)
    masked_tensor = exp_tensor * mask_tensor
    summed_tensor = K.sum(masked_tensor, axis=1, keepdims=True)
    return masked_tensor / (summed_tensor + 1e-10)
Esempio n. 16
0
def compute_mask_loss(boxes,
                      masks,
                      annotations,
                      masks_target,
                      width,
                      height,
                      iou_threshold=0.5,
                      mask_size=(28, 28)):
    """compute overlap of boxes with annotations"""
    iou = overlap(boxes, annotations)
    argmax_overlaps_inds = K.argmax(iou, axis=1)
    max_iou = K.max(iou, axis=1)

    # filter those with IoU > 0.5
    indices = tf.where(K.greater_equal(max_iou, iou_threshold))
    boxes = tf.gather_nd(boxes, indices)
    masks = tf.gather_nd(masks, indices)
    argmax_overlaps_inds = K.cast(tf.gather_nd(argmax_overlaps_inds, indices), 'int32')
    labels = K.cast(K.gather(annotations[:, 4], argmax_overlaps_inds), 'int32')

    # make normalized boxes
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    boxes = K.stack([
        y1 / (K.cast(height, dtype=K.floatx()) - 1),
        x1 / (K.cast(width, dtype=K.floatx()) - 1),
        (y2 - 1) / (K.cast(height, dtype=K.floatx()) - 1),
        (x2 - 1) / (K.cast(width, dtype=K.floatx()) - 1),
    ], axis=1)

    # crop and resize masks_target
    # append a fake channel dimension
    masks_target = K.expand_dims(masks_target, axis=3)
    masks_target = tf.image.crop_and_resize(
        masks_target,
        boxes,
        argmax_overlaps_inds,
        mask_size
    )
    masks_target = masks_target[:, :, :, 0]  # remove fake channel dimension

    # gather the predicted masks using the annotation label
    masks = tf.transpose(masks, (0, 3, 1, 2))
    label_indices = K.stack([tf.range(K.shape(labels)[0]), labels], axis=1)

    masks = tf.gather_nd(masks, label_indices)

    # compute mask loss
    mask_loss = K.binary_crossentropy(masks_target, masks)
    normalizer = K.shape(masks)[0] * K.shape(masks)[1] * K.shape(masks)[2]
    normalizer = K.maximum(K.cast(normalizer, K.floatx()), 1)
    mask_loss = K.sum(mask_loss) / normalizer

    return mask_loss
Esempio n. 17
0
 def softmax(x, axis=1):
     ndim = K.ndim(x)
     if ndim == 2:
         return K.softmax(x)
     elif ndim > 2:
         e = K.exp(x - K.max(x, axis=axis, keepdims=True))
         s = K.sum(e, axis=axis, keepdims=True)
         return e / s
     else:
         raise ValueError('Cannot apply softmax to a tensor that is 1D')
Esempio n. 18
0
    def fallback_metric(self, y_true, y_pred):
        #grab the most confident prediction
        predictions = K.max(y_pred, axis=-1)

        #fill a tensor with our threshold_value
        threshold_tensor = tf.fill(tf.shape(predictions), self.threshold)

        #Are we confident in our prediction?
        threshold_high = predictions > threshold_tensor
        threshold_high = tf.cast(threshold_high, tf.int32)

        #Do we have low confidence in our prediction?
        threshold_low = predictions <= threshold_tensor
        threshold_low = tf.cast(threshold_low, tf.int32)

        idx_true = K.argmax(y_true, -1)
        idx_pred = K.argmax(y_pred, -1)

        #For our confident predictions, compare the top prediction to the label of the true value
        high_correct = math_ops.equal(idx_true, idx_pred)
        high_correct = tf.cast(high_correct, tf.int32)

        #For our less confident predictions, grab the top 2 most confident predictions
        _, max_pred = tf.math.top_k(y_pred, k=2)

        #Gather the lineages of those top 2 predictions using the transpose of the hierarchy's adjaency matrix because the adjacency only points from ancestor to descendant
        lineages = tf.gather(K.transpose(self.hierarchy.A), max_pred)
        lineages = K.cast(lineages, tf.int32)

        #Grab the first two columns of this matrix
        fallback = tf.bitwise.bitwise_and(lineages[:, 0], lineages[:, 1])

        #Gather the lineage of the true value
        actual = tf.gather(K.transpose(self.hierarchy.A), K.argmax(y_true))
        actual = K.cast(actual, tf.int32)

        #Multiply the two together
        overlap_score = K.batch_dot(fallback, actual)

        #Are either of the top 2 predictions in the lineage of the true value? If so, overlap_score should be >1 and we count the result as correct
        low_correct = overlap_score > 1
        low_correct = tf.cast(low_correct, tf.int32)
        low_correct = tf.squeeze(low_correct)

        #results for the high confidence predictions
        high_accuracy = tf.math.multiply(threshold_high, high_correct)

        #results for the low confidence predictions
        low_accuracy = tf.math.multiply(threshold_low, low_correct)

        # total accuracy vector
        correct = high_accuracy + low_accuracy

        #return batch accuracy value
        return K.mean(K.cast(correct, tf.float32))
Esempio n. 19
0
  def call(self, x, mask=None):
    """ The actual processing in the layer: Normalize, padd, then convolution.
    """
    input_1, input_2 = x
    input_shape = input_1.shape
    
    # assert input_shape == input_2._keras_shape
    
    self.H = input_shape[1]
    self.W = input_shape[2]
    self.C = input_shape[3]

    # normalization
    if self.use_norm is 'euclidean':
      input_1 = K.l2_normalize(input_1, axis=2)
      input_2 = K.l2_normalize(input_2, axis=2)

    if self.use_norm is 'scaling':
      input_1_min = K.min(input_1, axis=2, keepdims=True)
      input_1_max = K.max(input_1, axis=2, keepdims=True)
      input_1 = (input_1 - input_1_min) / (input_1_max - input_1_min + 0.000001)
  
      input_2_min = K.min(input_2, axis=2, keepdims=True)
      input_2_max = K.max(input_2, axis=2, keepdims=True)
      input_2 = (input_2 - input_2_min) / (input_2_max - input_2_min + 0.000001)

    if self.use_norm is 'standardization':
      input_1 = (input_1 - K.mean(input_1, axis=2, keepdims=True)) + 0.00001
      input_1 = K.l2_normalize(input_1, axis=2)
      input_2 = (input_2 - K.mean(input_2, axis=2, keepdims=True)) + 0.00001
      input_2 = K.l2_normalize(input_2, axis=2)

    # Pad the first input1 circular, so that a correlation can be computed for
    # every horizontal position    
    padding1 = RangePadding2D(padding=self.W // 2)(input_1)

    # tf.scan的原理解析:https://zhuanlan.zhihu.com/p/96503559
    out = tf.scan(self.single_sample_corr,
                  elems=[padding1, input_2],
                  initializer=(K.zeros((int(self.H), int(self.W), int(self.output_dim))))
                  )
    return out
Esempio n. 20
0
    def call(self, inputs, **kwargs):
        batch_size, input_len, _ = inputs.shape
        q = K.expand_dims(K.dot(inputs, self.Wq), 2)
        k = K.expand_dims(K.dot(inputs, self.Wk), 1)
        h = tf.tanh(q + k + self.bh)

        e = K.dot(h, self.Wv) + self.ba
        # e = K.reshape(e, shape=(batch_size, input_len, input_len))
        e = tf.reshape(e, shape=(batch_size, input_len, input_len))
        e = K.exp(e - K.max(e, axis=-1, keepdims=True))
        s = K.sum(e, axis=-1, keepdims=True)
        a = e / (s + K.epsilon())
        v = K.batch_dot(a, inputs)
        return v
Esempio n. 21
0
def process_yolo_layer_output(feats, anchors, num_classes, input_shape, image_shape):
    """Process Conv layer output"""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape)

    box_scores = box_confidence * box_class_probs
    highest_score_indexes = K.argmax(box_scores, axis=-1)
    box_classes = K.reshape(highest_score_indexes, [-1])
    highest_scores = K.max(box_scores, axis=-1)
    highest_box_scores = K.reshape(highest_scores, [-1])

    boxes = scale_boxes_to_original_image_size(box_xy, box_wh, image_shape)
    boxes = K.reshape(boxes, [-1, 4])

    return boxes, highest_box_scores, box_classes
Esempio n. 22
0
def compute_fd_loss(boxes, scores, annotations, iou_threshold=0.75):
    """compute the overlap of boxes with annotations"""
    iou = overlap(boxes, annotations)

    max_iou = K.max(iou, axis=1, keepdims=True)
    targets = K.cast(K.greater_equal(max_iou, iou_threshold), K.floatx())

    # compute the loss
    loss = focal(targets, scores)  # alpha=self.alpha, gamma=self.gamma)

    # compute the normalizer: the number of cells present in the image
    normalizer = K.cast(K.shape(annotations)[0], K.floatx())
    normalizer = K.maximum(K.cast_to_floatx(1.0), normalizer)

    return K.sum(loss) / normalizer
Esempio n. 23
0
    def call(self, inputs):

        # inputs_trans = (batch_size, the number of filters, sentence_length)
        inputs_trans = tf.transpose(inputs, [0, 2, 1])

        # at = (batch_size, the number of classes, sentence_length)
        at = tf.matmul(self.Wa, inputs_trans)

        # Softmax
        at = K.exp(at - K.max(at, axis=-1, keepdims=True))
        at = at / K.sum(at, axis=-1, keepdims=True)

        # weighted sum
        # v = (batch_size, the number of classes, the number of filters)
        v = K.batch_dot(at, inputs)

        return v
Esempio n. 24
0
    def call(self, h, mask=None):
        h_shape = K.shape(h)
        d_w, T = h_shape[0], h_shape[1]

        logits = K.dot(h, self.w)  # w^T h
        logits = K.reshape(logits, (d_w, T))
        alpha = K.exp(logits - K.max(logits, axis=-1, keepdims=True))  # exp

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            alpha = alpha * mask
        alpha = alpha / K.sum(alpha, axis=1, keepdims=True)  # softmax
        r = K.sum(h * K.expand_dims(alpha), axis=1)  # r = h*alpha^T
        h_star = K.tanh(r)  # h^* = tanh(r)
        if self.return_attention:
            return [h_star, alpha]
        return h_star
Esempio n. 25
0
    def call(self, seq_value_len_list, **kwargs):
        uiseq_embed_list, user_behavior_length = seq_value_len_list
        embedding_size = uiseq_embed_list.shape[-1]
        mask = tf.sequence_mask(user_behavior_length,
                                self.seq_len_max, dtype=tf.float32)

        mask = K.permute_dimensions(mask, [0, 2, 1])
        mask = tf.tile(mask, [1, 1, embedding_size])
        uiseq_embed_list *= mask
        hist = uiseq_embed_list
        if self.mode == "max":
            return K.max(hist, 1, keepdims=True)

        hist = K.sum(hist, 1, keepdims=False)
        if self.mode == "mean":

            hist = tf.div(hist, user_behavior_length)
        hist = tf.expand_dims(hist, axis=1)
        return hist
Esempio n. 26
0
    def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result
Esempio n. 27
0
    def call(self, inputs):
        if not self.norm_method:
            outputs = inputs

        elif self.norm_method == 'whole_image':
            axes = [3, 4] if self.channel_axis == 1 else [2, 3]
            outputs = inputs - K.mean(inputs, axis=axes, keepdims=True)
            outputs = outputs / K.std(inputs, axis=axes, keepdims=True)

        elif self.norm_method == 'std':
            outputs = inputs - self._average_filter(inputs)
            outputs = outputs / self._window_std_filter(outputs)

        elif self.norm_method == 'max':
            outputs = inputs / K.max(inputs)
            outputs = outputs - self._average_filter(outputs)

        else:
            raise NotImplementedError('"{}" is not a valid norm_method'.format(
                self.norm_method))

        return outputs
Esempio n. 28
0
 def call(self, inputs):
     if self.data_format == 'channels_last':
         return backend.max(inputs, axis=[1, 2])
     else:
         return backend.max(inputs, axis=[2, 3])
Esempio n. 29
0
 def call(self, inputs):
     steps_axis = 1 if self.data_format == 'channels_last' else 2
     return backend.max(inputs, axis=steps_axis)
Esempio n. 30
0
 def call(self, inputs):
   if self.data_format == 'channels_last':
     return backend.max(inputs, axis=[1, 2, 3])
   else:
     return backend.max(inputs, axis=[2, 3, 4])
Esempio n. 31
0
 def call(self, inputs):
   steps_axis = 1 if self.data_format == 'channels_last' else 2
   return backend.max(inputs, axis=steps_axis)
 def call(self, inputs):
     return backend.max(inputs, axis=1)
Esempio n. 33
0
 def call(self, inputs):
   return backend.max(inputs, axis=1)