示例#1
0
def _label_to_one_hot(tens, nb_labels):
    """
    Transform a label nD Tensor to a one-hot 3D Tensor. The input tensor is first
    batch-flattened, and then each batch and each voxel gets a one-hot representation
    """
    y = K.batch_flatten(tens)
    return K.one_hot(y, nb_labels)
示例#2
0
    def call(self, x):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[2, 2])
        alpha = K.l2_normalize(alpha, 1)
        alpha = K.one_hot(K.argmax(alpha, 1), K.int_shape(inp_a)[1])
        hmax = K.batch_dot(alpha, outp_b, axes=[1, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmax = hmax * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmax = K.l2_normalize(outp_hmax, -1)
            outp = K.batch_dot(outp_hmax, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m
        return [persp, persp]
 def mask_logits(self, inputs, mask, clen, mask_value=-1e12):
     shapes = [x if x != None else -1 for x in inputs.shape.as_list()]
     mask = K.cast(mask, tf.int32)
     mask = K.one_hot(mask[:, 0], shapes[-1])
     mask = 1 - K.cumsum(mask, 1)
     mask = tf.cast(mask, tf.float32)
     mask = tf.tile(tf.expand_dims(mask, axis=1), [1, clen, 1])
     return inputs + mask_value * (1 - mask)
 def mask_logits(self, inputs, mask, mask_value=-1e12):
     shapes = [x if x != None else -1 for x in inputs.shape.as_list()]
     mask = K.cast(mask, tf.int32)
     mask = K.one_hot(mask[:, 0], shapes[-1])
     mask = 1 - K.cumsum(mask, 1)
     mask = tf.cast(mask, tf.float32)
     mask = tf.reshape(mask, [shapes[0], 1, 1, shapes[-1]])
     return inputs + mask_value * (1 - mask)
示例#5
0
 def test_one_hot(self):
     input_length = 10
     num_classes = 20
     batch_size = 30
     indices = np.random.randint(0, num_classes, size=(batch_size, input_length))
     oh = np.eye(num_classes)[indices]
     for K in [KTH, KTF]:
         koh = K.eval(K.one_hot(K.variable(indices, dtype='int32'), num_classes))
         assert np.all(koh == oh)
示例#6
0
def test_sparse_categorical_accuracy_correctness():
    y_a = K.variable(np.random.randint(0, 7, (6,)), dtype=K.floatx())
    y_b = K.variable(np.random.random((6, 7)), dtype=K.floatx())
    # use one_hot embedding to convert sparse labels to equivalent dense labels
    y_a_dense_labels = K.cast(K.one_hot(K.cast(y_a, dtype='int32'), num_classes=7),
                              dtype=K.floatx())
    sparse_categorical_acc = metrics.sparse_categorical_accuracy(y_a, y_b)
    categorical_acc = metrics.categorical_accuracy(y_a_dense_labels, y_b)
    assert np.allclose(K.eval(sparse_categorical_acc), K.eval(categorical_acc))
示例#7
0
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
 def Mask(self, inputs, seq_len, axis=1, time_dim=1, mode='mul'):
     if seq_len == None:
         return inputs
     else:
         seq_len=K.cast(seq_len,tf.int32)
         mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[time_dim])
         mask = 1 - K.cumsum(mask, 1)
         mask = K.expand_dims(mask, axis)
         if mode == 'mul':
             return inputs * mask
         if mode == 'add':
             return inputs - (1 - mask) * 1e12
示例#9
0
 def Mask(self, inputs, seq_len, mode='mul'):
     if seq_len == None:
         return inputs
     else:
         mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1])
         mask = 1 - K.cumsum(mask, 1)
         for _ in range(len(inputs.shape)-2):
             mask = K.expand_dims(mask, 2)
         if mode == 'mul':
             return inputs * mask
         if mode == 'add':
             return inputs - (1 - mask) * 1e12
示例#10
0
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    cross_entropy = -K.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean
示例#11
0
    def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked
   def vae_loss(self, x, x_decoded_mean):
       global weights
       
       weights = K.variable(weights)
       x = K.cast(x,dtype = 'int32')
       x = K.one_hot(x,num_classes = NB_WORDS)
      
       y_pred = x_decoded_mean/K.sum(x_decoded_mean, axis=-1, keepdims=True)
       y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
       # calc
       loss = x * K.log(y_pred) * weights + (1-x)*K.log(1-y_pred)
       xent_loss = -K.mean(K.sum(loss, -1),-1)
 
      
       kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
       
       xent_loss = K.mean(xent_loss)
       kl_loss = K.mean(kl_loss)
       return K.mean(xent_loss + kl_weight * kl_loss)
示例#13
0
def mean_IoU(y_true, y_pred):
	s = K.shape(y_true)

	# reshape such that w and h dim are multiplied together
	y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
	y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

	# correctly classified
	clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
	equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

	intersection = K.sum(equal_entries, axis=1)
	union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

	iou = intersection / (union_per_class - intersection)
	iou_mask = tf.is_finite(iou)
	iou_masked = tf.boolean_mask(iou,iou_mask)

	return K.mean( iou_masked )
示例#14
0
def mean_acc(y_true, y_pred):
	s = K.shape(y_true)

	# reshape such that w and h dim are multiplied together
	y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
	y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

	# correctly classified
	clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
	equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

	correct_pixels_per_class = K.sum(equal_entries, axis=1)
	n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

	acc = correct_pixels_per_class / n_pixels_per_class
	acc_mask = tf.is_finite(acc)
	acc_masked = tf.boolean_mask(acc,acc_mask)

	return K.mean(acc_masked)
示例#15
0
    def call(self, inputs, **kwargs):
        if type(
                inputs
        ) is list:  # 如果输入包含蒙版, 形状为 [None, n_classes], 每一条数据使用独热编码(one-hot code).
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # 如果输入没有包含蒙版, 使用胶囊向量长度产生蒙版.
            # 计算胶囊长度
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # 产生蒙版, 为独热编码
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1),
                             num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked
    def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train
示例#17
0
def softmax_crossentropy(score, class_label, musk):
    '''
    Args:
        score: B, N, num_class
        class_label: B, N - not one hot, but sparse
        musk: B, N - filters out some loss
    Returns:
        loss: scalar
    '''
    score_softmax = K.softmax(score, axis=-1)  # apply softmax
    class_label_onehot = K.one_hot(
        K.cast(class_label,
               "int32"), num_classes=K.int_shape(score)[-1])  # int to one hot
    class_label_onehot = K.cast(class_label_onehot, "float32")  # cast type
    class_loss = K.categorical_crossentropy(class_label_onehot,
                                            score_softmax)  # (B, K1)
    class_loss = K.sum(class_loss * K.cast(musk, 'float32')) / (
        K.sum(K.cast(musk, 'float32')) + K.epsilon())
    return class_loss
示例#18
0
def crossentropy_without_ambiguous(y_true, y_pred):

    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    #    class_weight = [0.2, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,
    #                    5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0]
    #    log_softmax = log_softmax * np.array(class_weight)

    #    y_true = K.one_hot(tf.to_int32(K.flatten(K.argmax(y_true))), K.int_shape(y_pred)[-1]+1)
    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       K.int_shape(y_pred)[-1] + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    cross_entropy = -K.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean
示例#19
0
def jaccard_dist_discrete(y_true, y_pred):
    smooth = 1e-12
    # amount of classes
    cl = K.shape(y_true)[-1]-1
    # discrete, one hot predictions
    y_pred = K.one_hot(K.argmax(y_pred, axis=-1), cl+1)
    # reshape
    y_true = K.reshape(y_true[...,1:], [-1, cl])
    y_pred = K.reshape(y_pred[...,1:], [-1, cl])
    # multiple-class-problem
    T = K.sum(y_true, axis=0)
    P = K.sum(K.square(y_pred), axis=0)
    PT = K.sum(y_pred * y_true, axis=0)
    denom = P + T - PT
    # average the jaccard-distance 
    jcd = (denom - PT + smooth) / (denom + smooth)
    cl = K.tf.to_float(cl, name='ToFloat')
    result = K.sum(jcd) / cl
    return result
示例#20
0
 def build(self, shape_input):
     M = shape_input[1] - 1
     self.I = k_back.eye(M)
     init_mu = RandomUniform(minval=0.01, maxval=10)
     init_pfd = RandomUniform(minval=0.01, maxval=10)
     self.mu = self.add_weight('mu',
                               shape=(M, 1),
                               initializer=init_mu,
                               constraint=NonNeg())
     data_p = self.add_weight('data_p',
                              shape=(M, M - 1),
                              initializer=init_pfd,
                              constraint=NonNeg())
     data_p_scaled = data_p / k_back.sum(data_p, axis=1, keepdims=True)
     self.P = k_back.reshape(
         k_back.flatten(data_p_scaled)[None, :] @ k_back.one_hot(
             [j for j in range(M * M) if j % (M + 1) != 0], M * M), (M, M))
     self.odot = (self.P - self.I) * self.mu
     self.is_built = True
示例#21
0
    def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        prediction = self.model.output

        a_one_hot = K.one_hot(a, self.n_action)
        q_value = K.sum(prediction * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train
示例#22
0
    def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = Adam(lr=LEARNING_RATE, epsilon=MIN_GRAD)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train
示例#23
0
文件: losses.py 项目: theislab/LODE
    def focal_loss(self, y_true, y_pred):
        # flatten tensors
        y_pred = K.flatten(y_pred)
        y_true = K.flatten(y_true)

        y_true = K.flatten(K.one_hot(K.cast(y_true, 'int32'), num_classes = self.num_classes))

        # Clip the prediction value to prevent NaN's and Inf's
        epsilon = K.epsilon()
        y_pred = K.clip(y_pred, epsilon, 1. - epsilon)

        # Calculate Cross Entropy
        cross_entropy = -y_true * K.log(y_pred)

        # Calculate Focal Loss
        loss = self.alpha * K.pow(1 - y_pred, self.gamma) * cross_entropy

        # Compute mean loss in mini_batch
        return K.mean(K.sum(loss, axis = -1))
示例#24
0
    def call(self, inputs, **kwargs):
        if type(
                inputs
        ) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1),
                             num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked
    def call(self, inputs, mask=None):

        cos_m = math.cos(self.m)
        sin_m = math.sin(self.m)
        mm = sin_m * self.m
        threshold = math.cos(math.pi - self.m)

        # features
        X = inputs[0]
        # 1-D or one-hot label works as mask
        Y_mask = inputs[1]
        # If Y_mask is not in one-hot form, transfer it to one-hot form.
        if Y_mask.shape[-1] == 1:
            Y_mask = K.cast(Y_mask, tf.int32)
            Y_mask = K.reshape(K.one_hot(Y_mask, self.class_num),
                               (-1, self.class_num))

        X_normed = K.l2_normalize(X, axis=1)  # L2 Normalized X
        self.W = K.l2_normalize(self.W, axis=0)  # L2 Normalized Weights

        # cos(theta + m)
        cos_theta = K.dot(X_normed, self.W)
        cos_theta2 = K.square(cos_theta)
        sin_theta2 = 1. - cos_theta2
        sin_theta = K.sqrt(sin_theta2 + K.epsilon())
        cos_tm = self.s * ((cos_theta * cos_m) - (sin_theta * sin_m))

        # This condition controls the theta + m should in range [0, pi]
        #   0 <= theta + m < = pi
        #   -m <= theta <= pi - m
        cond_v = cos_theta - threshold
        cond = K.cast(K.relu(cond_v), dtype=tf.bool)
        keep_val = self.s * (cos_theta - mm)
        cos_tm_temp = tf.where(cond, cos_tm, keep_val)

        # mask by label
        Y_mask = +K.epsilon()
        inv_mask = 1. - Y_mask
        s_cos_theta = self.s * cos_theta

        output = K.softmax((s_cos_theta * inv_mask) + (cos_tm_temp * Y_mask))

        return output
示例#26
0
    def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        # train model이 history에서 예측한 q value
        prediction = self.model.output

        # history에서 취한 action을 one_hot
        # action이 0일 때 [1,0,0]
        a_one_hot = K.one_hot(a, self.action_size)

        # [1,0,0] * [0.32113, 0.1123, 0.00123] = [0.32113,0,0]과 같이 된다.
        # 위의 sum하면 0.32113
        q_value = K.sum(prediction * a_one_hot, axis=1)
        # target(reward + dicount_factor * np.max(model.predict(next_history)) )으로 받은 값이 y
        # 즉 정답인 y - q_value만큼의 오차가 발생하는데 현재 q_value의 값이 잘 못 되었을 경우 오차가 크게 발생
        # 위에서 q_value는 우리가 예측한 값이다. (q hat)
        error = K.abs(y - q_value)


        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        # updates는 list
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)
        """Instantiates a Keras function.

            # Arguments
                inputs: List of placeholder tensors.
                outputs: List of output tensors.
                updates: List of update ops.
                **kwargs: Passed to `tf.Session.run`.

            # Returns
                Output values as Numpy arrays.

            # Raises
                ValueError: if invalid kwargs are passed in.
            """
        return train
示例#27
0
    def call(self, inputs, **kwargs):
        if type(
                inputs
        ) is list:  # 正しいラベルはshape=[None, n_class]で与えられる. すなわちone-hotである.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # 正しくないラベルは, 最大値ラベルでマスクされる.
            # カプセルの長さを計算
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # one-hotで記述されたマスクを生成
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1),
                             num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked
示例#28
0
def fgsm(model, sess, sample):

    x = cv2.resize(sample, (164, 48))  # 固定大小164.48
    x = np.array([x.transpose(1, 0, 2)])
    x_adv = x
    x_noise = np.zeros_like(x)

    saver = tf.train.Saver()
    if os.path.exists('.metadata/noise.npy') == True:
        noise = np.load(".metadata/noise.npy")
        x_adv = x_adv + noise

    else:
        for i in range(epochs):
            target = K.one_hot(target_class, 84)
            loss = -1 * K.categorical_crossentropy(target, model.output)
            grads = K.gradients(loss, model.input)
            delta = K.sign(grads[0])
            x_noise = x_noise + delta
            x_adv = x_adv + epsilon * delta
            x_adv = sess.run(x_adv, feed_dict={model.input: x})

            y_pred = model.predict(x_adv)
            y_pred = y_pred[:, 2:, :]
            result, confidence = fastdecode(y_pred)
            #prev_probs.append(preds[0][target_class])

            print("Epoch " + str(i + 1) + ":", end='')
            print(str(result) + '  ' + str(confidence))
            noise = x_adv - x
            pass
        # saver.save(sess, './checkpoint_dir/MyModel')

    plot_img(np2img(x_adv), './images_out/wbox_adv_img.jpg')
    print('\nAdversarial Result:')
    y_pred = model.predict(x_adv)
    y_pred = y_pred[:, 2:, :]
    result, confidence = fastdecode(y_pred)
    print(str(result) + '  ' + str(confidence))
    np.save(".metadata/noise.npy", noise)
    np.save(".metadata/adv.npy", x_adv)
    return [str(result), str(confidence)[:5]]
示例#29
0
    def viterbi_decoding(self, X, mask=None):
        input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
        if self.use_boundary:
            input_energy = self.add_boundary_energy(input_energy, mask,
                                                    self.left_boundary,
                                                    self.right_boundary)

        argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
        argmin_tables = K.cast(argmin_tables, 'int32')

        # backward to find best path, `initial_best_idx` can be any, as all elements in the last argmin_table are the same
        argmin_tables = K.reverse(argmin_tables, 1)
        initial_best_idx = [
            K.expand_dims(argmin_tables[:, 0, 0])
        ]  # matrix instead of vector is required by tf `K.rnn`
        if K.backend() == 'theano':
            initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)]

        def gather_each_row(params, indices):
            n = K.shape(indices)[0]
            if K.backend() == 'theano':
                return params[K.T.arange(n), indices]
            else:
                indices = K.transpose(K.stack([K.tf.range(n), indices]))
                return K.tf.gather_nd(params, indices)

        def find_path(argmin_table, best_idx):
            next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
            next_best_idx = K.expand_dims(next_best_idx)
            if K.backend() == 'theano':
                next_best_idx = K.T.unbroadcast(next_best_idx, 1)
            return next_best_idx, [next_best_idx]

        _, best_paths, _ = K.rnn(find_path,
                                 argmin_tables,
                                 initial_best_idx,
                                 input_length=K.int_shape(X)[1],
                                 unroll=self.unroll)
        best_paths = K.reverse(best_paths, 1)
        best_paths = K.squeeze(best_paths, 2)

        return K.one_hot(best_paths, self.units)
示例#30
0
def mean_iou(y_true, y_pred, smooth=None, axis=-1):
    """Jaccard distance for semantic segmentation, also known as the intersection-over-union loss.
    This loss is useful when you have unbalanced numbers of pixels within an image
    because it gives all classes equal weight. However, it is not the defacto
    standard for image segmentation.
    For example, assume you are trying to predict if each pixel is cat, dog, or background.
    You have 80% background pixels, 10% dog, and 10% cat. If the model predicts 100% background
    should it be be 80% right (as with categorical cross entropy) or 30% (with this loss)?
    The loss has been modified to have a smooth gradient as it converges on zero.
    This has been shifted so it converges on 0 and is smoothed to avoid exploding
    or disappearing gradient.
    Also see jaccard which takes a slighty different approach.
    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
    # References
    Csurka, Gabriela & Larlus, Diane & Perronnin, Florent. (2013).
    What is a good evaluation measure for semantic segmentation?.
    IEEE Trans. Pattern Anal. Mach. Intell.. 26. . 10.5244/C.27.32.
    https://en.wikipedia.org/wiki/Jaccard_index
    """
    if smooth is None:
        smooth = K.epsilon()
    pred_shape = K.shape(y_pred)
    true_shape = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_pred_reshaped = K.reshape(y_pred, (-1, pred_shape[-1]))
    y_true_reshaped = K.reshape(y_true, (-1, true_shape[-1]))

    # correctly classified
    clf_pred = K.one_hot(K.argmax(y_pred_reshaped), num_classes=true_shape[-1])
    equal_entries = K.cast(K.equal(clf_pred, y_true_reshaped),
                           dtype='float32') * y_true_reshaped

    intersection = K.sum(equal_entries, axis=1)
    union_per_class = K.sum(y_true_reshaped, axis=1) + K.sum(y_pred_reshaped,
                                                             axis=1)

    # smooth added to avoid dividing by zero
    iou = (intersection + smooth) / ((union_per_class - intersection) + smooth)

    return K.mean(iou)
示例#31
0
    def sp_index(y_true, y_pred):
        """

        :param y_true:
        :param y_pred:
        :return:
        """
        num_classes = K.int_shape(y_pred)[
            1]  # y_true returns (None, None) for int_shape
        # y_pred returns (None, num_classes)

        true_positives = K.sum(K.cast(
            y_true * K.one_hot(K.argmax(y_pred, axis=1), num_classes),
            dtype='float32'),
                               axis=0)
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)
        recall = true_positives / (possible_positives + K.epsilon())
        sp = K.sqrt(K.mean(recall) * K.prod(K.pow(recall, 1 / num_classes)))

        return sp
示例#32
0
def yolo_boxes_and_scores(feats, anchors, attributes, num_seen, num_classes,
                          input_shape, image_shape):
    """Process Conv layer output"""
    box_xy, box_wh, box_attribute, object_prob = yolo_head(
        feats, anchors, num_seen, input_shape)
    boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
    boxes = K.reshape(boxes, [-1, 4])

    for _ in range(4):
        attributes = K.expand_dims(attributes, 0)
    box_confidence = K.max(object_prob, axis=-1, keepdims=True)

    num_unseen = num_classes - num_seen
    box_class_probs = cosine_similarity(K.expand_dims(box_attribute, -2),
                                        attributes)
    box_class_probs = K.one_hot(K.argmax(box_class_probs[..., num_seen:], -1),
                                num_unseen)
    box_scores = box_confidence * box_class_probs
    box_scores = K.reshape(box_scores, [-1, num_unseen])
    return boxes, box_scores
示例#33
0
    def build_normal_optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        behavior = self.behavior_policy.output
        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(behavior * a_one_hot, axis=1)
        error = K.abs(y - q_value)
        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        print(" ##### build_normal_optimizer(input:{}, output:{}) #####\n".
              format(self.behavior_policy.input, self.behavior_policy.output))
        adam = Adam(lr=self.learning_rate, epsilon=self.learning_epsilon)
        updates = adam.get_updates(self.behavior_policy.trainable_weights, [],
                                   loss)
        optim = K.function([self.behavior_policy.input, a, y], [loss],
                           updates=updates)
        return optim
示例#34
0
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    '''
    save_prefix = "test"
    save_to_dir = ('~/KerasWorkspace/Keras-FCN/weedSpec1/debug2')
    fname = '{prefix}_{hash}'.format(prefix=save_prefix,
		              hash=np.random.randint(1e4))
    img.save(os.path.join(save_to_dir, 'img_' + fname + '.{format}'.format(format='png')))
    label.save(os.path.join(save_to_dir, 'label_' + fname + '.png'))
    '''

    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
示例#35
0
    def call(self, x):
        # Flatten input except for last dimension.
        flat_inputs = K.reshape(x, (-1, self.embedding_dim))

        # Calculate distances of input to embedding vectors.
        distances = (K.sum(flat_inputs**2, axis=1, keepdims=True) -
                     2 * K.dot(flat_inputs, self.w) +
                     K.sum(self.w**2, axis=0, keepdims=True))

        # Retrieve encoding indices.
        encoding_indices = K.argmax(-distances, axis=1)
        encodings = K.one_hot(encoding_indices, self.num_embeddings)
        encoding_indices = K.reshape(encoding_indices, K.shape(x)[:-1])
        quantized = self.quantize(encoding_indices)

        # Metrics.
        #avg_probs = K.mean(encodings, axis=0)
        #perplexity = K.exp(- K.sum(avg_probs * K.log(avg_probs + epsilon)))

        return quantized
示例#36
0
    def call(self, x):
        # Flatten input except for last dimension.
        flat_inputs = K.reshape(x, (-1, self.embedding_dim))

        # Calculate distances of input to embedding vectors.
        distances = (K.sum(flat_inputs**2, axis=1, keepdims=True) -
                     2 * K.dot(flat_inputs, self.w) +
                     K.sum(self.w**2, axis=0, keepdims=True))

        # Retrieve encoding indices.
        encoding_indices = K.argmax(-distances, axis=1)
        encodings = K.one_hot(encoding_indices, self.num_classes)
        encoding_indices = K.reshape(encoding_indices, K.shape(x)[:-1])
        quantized = self.quantize(encoding_indices)

        e_latent_loss = K.mean((K.stop_gradient(quantized) - x)**2)
        q_latent_loss = K.mean((quantized - K.stop_gradient(x))**2)
        self.add_loss(e_latent_loss + q_latent_loss * self.beta)

        return K.stop_gradient(quantized - x) + x
示例#37
0
文件: DQN.py 项目: yhcao6/DQN
    def optimizer(self):
        action = K.placeholder(shape=(None, ), dtype='int32')
        target_q = K.placeholder(shape=(None, ), dtype='float32')
        action_mask = K.one_hot(action, ACTION_COUNT)

        pred = self.model.output
        q = K.sum(pred * action_mask, axis=1)
        err = K.abs(target_q - q)

        # huge loss
        l2 = K.clip(err, 0.0, 1.0)
        l1 = err - l2
        loss = K.mean(0.5 * K.square(l2) + l1)

        optimizer = RMSprop(lr=LEARING_RATE, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, action, target_q], [loss],
                           updates=updates)

        return train
示例#38
0
    def build_optimizer(self):
        action = K.placeholder(shape=(None, ), dtype="int32")
        y = K.placeholder(shape=(None, ), dtype="float32")
        out = self.model.output

        # huber loss
        one_hot = K.one_hot(action, self.action_size)
        q_value = K.sum(out * one_hot, axis=1)
        error = y - q_value
        condition = K.abs(error) < 1.0
        squared_loss = 0.5 * K.square(error)
        linear_loss = K.abs(error) - 0.5
        clipped_error = tf.where(condition, squared_loss, linear_loss)
        loss = K.mean(clipped_error)

        optimizer = RMSprop(lr=self.learning_rate, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train_op = K.function([self.model.input, action, y], [loss],
                              updates=updates)
        return train_op
示例#39
0
    def _focal_loss(label, logit, num_classes=2):
        """
        :param label: (batch_size,)
        :param logit: (batch_size, classes_num)
        """
        label = K.one_hot(label, num_classes=num_classes)
        one_minus_p = tf.where(tf.equal(label, tf.ones_like(label)),
                               label - logit, tf.zeros_like(label))
        fl = -1 * one_minus_p**gamma * tf.log(
            tf.clip_by_value(one_minus_p, 1e-8, 1.0))

        if alpha is None:
            return tf.reduce_sum(fl)

        assert len(alpha) == num_classes
        t_alpha = tf.convert_to_tensor(alpha, dtype=logit.dtype)
        t_alpha = tf.reshape(t_alpha, shape=(1, -1))
        alpha_ = tf.zeros_like(logit, dtype=logit.dtype) + t_alpha
        fl = alpha_ * fl
        return tf.reduce_sum(fl)
    def weighted_multinomial_loss(y_true, y_pred):
        """
        :param y_pred: np.array, dimensions should be (n, h, w, q)
        :param soft_encoded: np.array, dimensions should be (n, h, w, q)
        Make sure all values are between 0 and 1, and that the sum of soft_encoded = 1
        :return: loss
        """

        v = K.argmax(y_true, axis=3)
        v = K.one_hot(v, 262)
        v = v * weights
        v = K.sum(v, axis=3)

        loss = K.categorical_crossentropy(y_true, y_pred, axis=3)  # Cross entropy
        loss = K.dot(v, loss)
        loss = K.sum(loss, axis=1)  # Sum over all width vectors
        loss = K.sum(loss, axis=1)  # Sum over all height vectors
        loss = K.sum(loss)  # Sum over all images in the batch

        return loss
示例#41
0
    def call(self, logits):
        logits_ = K.permute_dimensions(logits, (0, 2, 1))
        #[batchsize, 1, MAX_SENTS]

        unif_shape = tf.shape(logits_)[0]
        uniform = tf.random_uniform(shape=(unif_shape, self.k, MAX_SENTS),
                                    minval=np.finfo(
                                        tf.float32.as_numpy_dtype).tiny,
                                    maxval=1.0)

        gumbel = -K.log(-K.log(uniform))
        noisy_logits = (gumbel + logits_) / self.tau0
        samples = K.softmax(noisy_logits)
        samples = K.max(samples, axis=1)
        samples = K.expand_dims(samples, -1)

        discrete_logits = K.one_hot(K.argmax(logits_, axis=-1),
                                    num_classes=MAX_SENTS)
        discrete_logits = K.permute_dimensions(discrete_logits, (0, 2, 1))
        return K.in_train_phase(samples, discrete_logits)
示例#42
0
    def call(self, inputs, mask=None):
        cos_m = math.cos(self.m)
        sin_m = math.sin(self.m)
        mm = sin_m * self.m
        threshold = math.cos(math.pi - self.m)
        # inputs:
        # x: features, y_mask: 1-D or one-hot label works as mask
        x = inputs[0]
        y_mask = inputs[1]
        if y_mask.shape[-1] == 1:
            y_mask = K.cast(y_mask, tf.int32)
            y_mask = K.reshape(K.one_hot(y_mask, self.class_num),
                               (-1, self.class_num))

        # feature norm
        x = K.l2_normalize(x, axis=1)
        # weights norm
        self.W = K.l2_normalize(self.W, axis=0)

        # cos(theta+m)
        cos_theta = K.dot(x, self.W)
        cos_theta2 = K.square(cos_theta)
        sin_theta2 = 1. - cos_theta2
        sin_theta = K.sqrt(sin_theta2 + K.epsilon())
        cos_tm = self.s * ((cos_theta * cos_m) - (sin_theta * sin_m))

        # this condition controls the theta+m should in range [0, pi]
        #      0<=theta+m<=pi
        #     -m<=theta<=pi-m
        cond_v = cos_theta - threshold
        cond = K.cast(K.relu(cond_v), dtype=tf.bool)
        keep_val = self.s * (cos_theta - mm)
        cos_tm_temp = tf.where(cond, cos_tm, keep_val)

        # mask by label
        y_mask = +K.epsilon()
        inv_mask = 1. - y_mask
        s_cos_theta = self.s * cos_theta
        output = K.softmax((s_cos_theta * inv_mask) + (cos_tm_temp * y_mask))

        return output
    def call(self, inputs, mask=None):
        cos_m = math.cos(self.m)
        sin_m = math.sin(self.m)
        mm = sin_m * self.m
        threshold = math.cos(math.pi - self.m)
        # inputs:
        # x: features, y_mask: 1-D or one-hot label works as mask
        x = inputs[0]
        y_mask = inputs[1]
        if y_mask.shape[-1]==1:
            y_mask = K.cast(y_mask, tf.int32)
            y_mask = K.reshape(K.one_hot(y_mask, self.class_num),(-1, self.class_num))

        # feature norm
        x = K.l2_normalize(x, axis=1)
        # weights norm
        self.W = K.l2_normalize(self.W, axis=0)

        # cos(theta+m)
        cos_theta = K.dot(x, self.W)
        cos_theta2 = K.square(cos_theta)
        sin_theta2 = 1. - cos_theta2
        sin_theta = K.sqrt(sin_theta2 + K.epsilon())
        cos_tm = self.s * ((cos_theta * cos_m) - (sin_theta * sin_m))

        # this condition controls the theta+m should in range [0, pi]
        #      0<=theta+m<=pi
        #     -m<=theta<=pi-m
        cond_v = cos_theta - threshold
        cond = K.cast(K.relu(cond_v), dtype=tf.bool)
        keep_val = self.s * (cos_theta - mm)
        cos_tm_temp = tf.where(cond, cos_tm, keep_val)

        # mask by label
        y_mask =+ K.epsilon()
        inv_mask = 1. - y_mask
        s_cos_theta = self.s * cos_theta
        output = K.softmax((s_cos_theta * inv_mask) + (cos_tm_temp * y_mask))

        return output
示例#44
0
def yolo_loss(args,
              anchors,
              num_classes,
              rescore_confidence=False,
              print_loss=False):
    """YOLO localization loss function.

    Parameters
    ----------
    yolo_output : tensor
        Final convolutional layer features.

    true_boxes : tensor
        Ground truth boxes tensor with shape [batch, num_true_boxes, 5]
        containing box x_center, y_center, width, height, and class.

    detectors_mask : array
        0/1 mask for detector positions where there is a matching ground truth.

    matching_true_boxes : array
        Corresponding ground truth boxes for positive detector positions.
        Already adjusted for conv height and width.

    anchors : tensor
        Anchor boxes for model.

    num_classes : int
        Number of object classes.

    rescore_confidence : bool, default=False
        If true then set confidence target to IOU of best predicted box with
        the closest matching ground truth box.

    print_loss : bool, default=False
        If True then use a tf.Print() to print the loss components.

    Returns
    -------
    mean_loss : float
        mean localization loss across minibatch
    """
    (yolo_output, true_boxes, detectors_mask, matching_true_boxes) = args
    num_anchors = len(anchors)
    object_scale = 5
    no_object_scale = 1
    class_scale = 1
    coordinates_scale = 1
    pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo_head(
        yolo_output, anchors, num_classes)

    # Unadjusted box predictions for loss.
    # TODO: Remove extra computation shared with yolo_head.
    yolo_output_shape = K.shape(yolo_output)
    feats = K.reshape(yolo_output, [
        -1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,
        num_classes + 5
    ])
    pred_boxes = K.concatenate(
        (K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)

    # TODO: Adjust predictions by image width/height for non-square images?
    # IOUs may be off due to different aspect ratio.

    # Expand pred x,y,w,h to allow comparison with ground truth.
    # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
    pred_xy = K.expand_dims(pred_xy, 4)
    pred_wh = K.expand_dims(pred_wh, 4)

    pred_wh_half = pred_wh / 2.
    pred_mins = pred_xy - pred_wh_half
    pred_maxes = pred_xy + pred_wh_half

    true_boxes_shape = K.shape(true_boxes)

    # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
    true_boxes = K.reshape(true_boxes, [
        true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]
    ])
    true_xy = true_boxes[..., 0:2]
    true_wh = true_boxes[..., 2:4]

    # Find IOU of each predicted box with each ground truth box.
    true_wh_half = true_wh / 2.
    true_mins = true_xy - true_wh_half
    true_maxes = true_xy + true_wh_half

    intersect_mins = K.maximum(pred_mins, true_mins)
    intersect_maxes = K.minimum(pred_maxes, true_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

    pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
    true_areas = true_wh[..., 0] * true_wh[..., 1]

    union_areas = pred_areas + true_areas - intersect_areas
    iou_scores = intersect_areas / union_areas

    # Best IOUs for each location.
    best_ious = K.max(iou_scores, axis=4)  # Best IOU scores.
    best_ious = K.expand_dims(best_ious)

    # A detector has found an object if IOU > thresh for some true box.
    object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))

    # TODO: Darknet region training includes extra coordinate loss for early
    # training steps to encourage predictions to match anchor priors.

    # Determine confidence weights from object and no_object weights.
    # NOTE: YOLO does not use binary cross-entropy here.
    no_object_weights = (no_object_scale * (1 - object_detections) *
                         (1 - detectors_mask))
    no_objects_loss = no_object_weights * K.square(-pred_confidence)

    if rescore_confidence:
        objects_loss = (object_scale * detectors_mask *
                        K.square(best_ious - pred_confidence))
    else:
        objects_loss = (object_scale * detectors_mask *
                        K.square(1 - pred_confidence))
    confidence_loss = objects_loss + no_objects_loss

    # Classification loss for matching detections.
    # NOTE: YOLO does not use categorical cross-entropy loss here.
    matching_classes = K.cast(matching_true_boxes[..., 4], 'int32')
    matching_classes = K.one_hot(matching_classes, num_classes)
    classification_loss = (class_scale * detectors_mask *
                           K.square(matching_classes - pred_class_prob))

    # Coordinate loss for matching detection boxes.
    matching_boxes = matching_true_boxes[..., 0:4]
    coordinates_loss = (coordinates_scale * detectors_mask *
                        K.square(matching_boxes - pred_boxes))

    confidence_loss_sum = K.sum(confidence_loss)
    classification_loss_sum = K.sum(classification_loss)
    coordinates_loss_sum = K.sum(coordinates_loss)
    total_loss = 0.5 * (
        confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)
    if print_loss:
        total_loss = tf.Print(
            total_loss, [
                total_loss, confidence_loss_sum, classification_loss_sum,
                coordinates_loss_sum
            ],
            message='yolo_loss, conf_loss, class_loss, box_coord_loss:')

    return total_loss
示例#45
0
def target_category_loss(x, category_index, nb_classes):
    return tf.multiply(x, K.one_hot([category_index], nb_classes))
示例#46
0
 def one_hot(self, seq, num_classes):
     return K.one_hot(seq, num_classes) #K.equal(K.reshape(seq, (-1, 1)), K.arange(num_classes))
示例#47
0
 def  one_hot(self, seq, num_classes):
     return K.one_hot(K.reshape(K.cast(seq, "int32"), (-1, 1)), num_classes)