def euclidean_distance(self, x, y):
     return K.sqrt(
         K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True),
                   K.epsilon()))
示例#2
0
def normalize(x):
    """Utility function to normalize a tensor by its L2 norm"""
    return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)
示例#3
0
 def euclidean_distance_loss(y_true, y_pred):
     return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1))
示例#4
0
 def softabsolute(self, X):
     return kb.sqrt(X**2 + self.epsilon)
示例#5
0
 def call(self, inputs):
     target, wrt = inputs
     grad = K.gradients(target, wrt)[0]
     return K.sqrt(K.sum(K.batch_flatten(K.square(grad)), axis=1, keepdims=True))-1
示例#6
0
    def get_updates(self, loss, params):

        self.updates = []
        self.updates.append(K.update_add(self.state_counter, 1))
        self.updates.append(K.update_add(self.iterator, 1))
        self.updates.append(K.update_add(self.iterations, 1))

        lr = self.lr
        ## lr exponential decay
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay *
                             K.cast(self.iterations, K.dtype(self.decay))))

        shapes = [K.int_shape(p) for p in params]
        x = [K.update(K.zeros(shape), p) for shape, p in zip(shapes, params)]
        mu = [K.update(K.zeros(shape), p) for shape, p in zip(shapes, params)]

        grads = self.get_gradients(loss, params)
        moments = [
            K.zeros(shape, name='moment_' + str(i))
            for (i, shape) in enumerate(shapes)
        ]

        for x_i, x_prime_i, mu_i, g, m in zip(x, params, mu, grads, moments):

            ## we update x_prime (if we are in LAngevin steps, we update, otherwise we switch to parameters x_i)
            dx_prime_i = g - self.gamma * (x_i - x_prime_i)
            x_prime_update_i = K.switch(
                K.any(K.stack([
                    K.equal(self.state_counter, 0),
                    K.equal(self.num_steps, self.iterator)
                ],
                              axis=0),
                      axis=0), x_i, x_prime_i - self.sgld_step * dx_prime_i +
                K.sqrt(self.sgld_step) * self.sgld_noise *
                K.random_normal(K.int_shape(x_prime_i)))
            # Apply constraints.
            if getattr(x_prime_i, 'constraint', None) is not None:
                x_prime_update_i = x_prime_i.constraint(x_prime_update_i)
            self.updates.append(K.update(x_prime_i, x_prime_update_i))

            ## We update mu (if we are in LAngevin steps, we update otherwise we switch to parameters x_i)
            mu_update_i = K.switch(K.equal(self.state_counter,
                                           0), x_i, (1 - self.alpha) * mu_i +
                                   self.alpha * x_prime_i)
            self.updates.append(K.update(mu_i, mu_update_i))

            ## As they described in the paper, we remove the gamma from the update because it interferes with the learning annealing
            ## After each outer loop update we apply an exponential decay on gamma
            ## The following lines concerns the outer loop updates

            ## Nesterov's momentum
            gradient = (x_i - mu_i)
            v = self.momentum * m - lr * gradient  # velocity
            self.updates.append(
                K.update(
                    m, K.switch(K.equal(self.state_counter, self.L + 1), v,
                                m)))
            if self.nesterov:
                new_x_i = x_i + self.momentum * v - lr * gradient
            else:
                new_x_i = x_i + v

            x_i_update = K.switch(K.equal(self.state_counter, self.L + 1),
                                  new_x_i, x_i)
            self.updates.append(K.update(x_i, x_i_update))

            ## Gamma scoping
            gamma_update = K.switch(K.equal(self.state_counter,
                                            self.L + 1), self.gamma,
                                    self.gamma * (1. + self.scoping))
            self.updates.append(K.update(self.gamma, gamma_update))

        counter = K.switch(K.equal(self.state_counter, self.L + 2),
                           K.constant(0, dtype='int64'), self.state_counter)
        self.updates.append(K.update(self.state_counter, counter))
        return self.updates
示例#7
0
文件: losses.py 项目: htjb/globalemu
 def rmse(self):
     return K.sqrt(K.mean(K.square(self.y - self.y_)))
示例#8
0
	def style_checker(self):
		def eucl_dist_output_shape(shapes):
			shape1, shape2 = shapes
			return (shapes[0], shapes[2])

		def fire_module(prv_lyr, fire_id, squeeze = 3, expand = 4):
			s_id = 'fire' + str(fire_id) + '/'
			sqz = 'sqz1'
			relu = 'relu_'
			exp1 = 'exp1'
			exp3 = 'exp3'

			#squeeze layer
			sqz_layer = Conv2D( squeeze, kernel_size=(1,1), padding='same', name=s_id+sqz )(prv_lyr)
			sqz_layer = Activation( 'relu', name=s_id+relu+sqz )(sqz_layer)

			#expand layer
			#1*1
			exp1_layer = Conv2D( expand, kernel_size=(1,1), padding='same', name=s_id+exp1)(sqz_layer)
			exp1_layer = Activation( 'relu', name=s_id+relu+exp1)(exp1_layer)
			#3*3
			exp3_layer = Conv2D( expand, kernel_size=(3,3), padding='same', name=s_id+exp3)(sqz_layer)
			exp3_layer = Activation( 'relu', name=s_id+relu+exp3)(exp3_layer)

			cnct_layer = concatenate([exp1_layer, exp3_layer])

			return cnct_layer

		def squeezeNet():
			
			inputs = Input(shape=img_shape)

			x = Conv2D(96, kernel_size=(4,4), padding='same', name='conv1' )(inputs)
			x = Activation('relu', name='relu_conv1')(x)
			x = MaxPool2D(pool_size=(3,3), strides=(2,2), name='pool1')(x)

			x = fire_module(x, fire_id=2, squeeze=16, expand=64)
			x = fire_module(x, fire_id=3, squeeze=16, expand=64)
			x = fire_module(x, fire_id=4, squeeze=32, expand=128)		
			x = MaxPool2D(pool_size=(3,3), strides=(2,2), name='pool2')(x)

			x = fire_module(x, fire_id=5, squeeze=32, expand=128)
			x = fire_module(x, fire_id=6, squeeze=48, expand=192)
			x = fire_module(x, fire_id=7, squeeze=48, expand=192)
			x = fire_module(x, fire_id=8, squeeze=64, expand=256)
			x = MaxPool2D(pool_size=(3,3), strides=(2,2), name='pool3')(x)

			x = fire_module(x, fire_id=9, squeeze=64, expand=256)
			x = BatchNormalization()(x)
			x = Conv2D(10, kernel_size=(4,4), padding='same', name='conv10')(x)
			x = Activation('relu', name='relu_conv10')(x)

			x = GlobalAveragePooling2D()(x)
			model = Model(
				inputs = inputs,
				outputs = x,
				name = 'squeezeNet'
				)
			
			return model
		
		input_base = Input(shape=img_shape)
		input_pair = Input(shape=img_shape)

		basemodel = squeezeNet()
		encode_base = basemodel(input_base)
		encode_pair = basemodel(input_pair)

		L2_layer = Lambda( lambda tensor: K.sqrt(K.sum((tensor[0]-tensor[1])**2, axis=1, keepdims=True )),  output_shape=eucl_dist_output_shape)
		L2_distance = L2_layer([encode_base, encode_pair])

		model = Model(
				inputs = [input_base, input_pair],
				outputs= L2_distance,
				name='style_checker'
				)

		return model
示例#9
0
def root_mean_squared_log_error_0(y_true, y_pred):
    return kb.sqrt(
        kb.mean(kb.square(kb.log(y_pred + 1) - kb.log(y_true + 1))) +
        0.00000001)
示例#10
0
acc = 1. * (np.prod(Y_pred == Y_test, axis=1)).sum() / len(X_test)
print('CNN+Pooling,不考虑置信度的准确率为:{}'.format(acc))
acc = 1. * (np.prod(Y_pred == Y_test, axis=1) * greater).sum() / len(X_test)
print('CNN+Pooling,考虑置信度的准确率为:{}'.format(acc))

#搭建CNN+Capsule分类模型
input_image = Input(shape=(None, None, 1))
cnn = Conv2D(64, (3, 3), activation='relu')(input_image)
cnn = Conv2D(64, (3, 3), activation='relu')(cnn)
cnn = AveragePooling2D((2, 2))(cnn)
cnn = Conv2D(128, (3, 3), activation='relu')(cnn)
cnn = Conv2D(128, (3, 3), activation='relu')(cnn)
cnn = Reshape((-1, 128))(cnn)
capsule = Capsule(10, 16, 3, True)(cnn)
output = Lambda(lambda x: K.sqrt(K.sum(K.square(x), 2)),
                output_shape=(10, ))(capsule)

model = Model(inputs=input_image, outputs=output)
model.compile(
    loss=lambda y_true, y_pred: y_true * K.relu(0.9 - y_pred)**2 + 0.25 *
    (1 - y_true) * K.relu(y_pred - 0.1)**2,
    optimizer='adam',
    metrics=['accuracy'])

model.summary()

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=10,
示例#11
0
def l2_proj_distance(x, grads, biases):
    return (K.abs(K.sum(grads * x[:, None], axis=-1) + biases) /
            K.sqrt(K.sum(grads * grads, axis=-1)))
示例#12
0
def l2_distance(tensors):
    x, y = tensors
    sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
示例#13
0
 def _euclidean_distance(vects):
     x, y = vects
     return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))
def rmsse(true, pred):
    assert pred.shape[0]==true.shape[0]
    # min : 0.03571428571428571
    loss_1 = K.sqrt((K.sum(K.square(true[:, 0:1] - pred))  + 1e-18) / K.sum(K.square(true[:, 1:2])))
    return loss_1    
 def target_layer(x):
     import tensorflow.keras.backend as K
     return K.sqrt(x)
 def rmse(y_true, y_pred):
     return K.sqrt(K.mean(K.square(y_pred - y_true)))
示例#17
0
    def call(self, inputs, training=None, mask=None):
        input_shape = K.shape(inputs)

        if self.rank == 1:
            input_shape = [input_shape[i] for i in range(3)]
            batch_shape, dim, channels = input_shape

            xx_range = K.tile(K.expand_dims(K.arange(0, dim), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=-1)

            xx_channels = K.cast(xx_range, K.dtype(inputs))
            xx_channels = xx_channels / K.cast(dim - 1, K.dtype(inputs))
            xx_channels = (xx_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels], axis=-1)

        if self.rank == 2:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
                input_shape = K.shape(inputs)

            input_shape = [input_shape[i] for i in range(4)]
            batch_shape, dim1, dim2, channels = input_shape

            xx_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=1)
            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            yy_ones = K.ones(K.stack([batch_shape, dim1]), dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              K.stack([batch_shape, 1]))
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
            xx_channels = (xx_channels * 2) - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
            yy_channels = (yy_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels, yy_channels],
                                    axis=-1)

            if self.use_radius:
                rr = K.sqrt(
                    K.square(xx_channels - 0.5) + K.square(yy_channels - 0.5))
                outputs = K.concatenate([outputs, rr], axis=-1)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])

        if self.rank == 3:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])
                input_shape = K.shape(inputs)

            input_shape = [input_shape[i] for i in range(5)]
            batch_shape, dim1, dim2, dim3, channels = input_shape

            xx_ones = K.ones(K.stack([batch_shape, dim3]), dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=1)

            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            xx_channels = K.expand_dims(xx_channels, axis=1)
            xx_channels = K.tile(xx_channels, [1, dim1, 1, 1, 1])

            yy_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim3), axis=0),
                              K.stack([batch_shape, 1]))
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            yy_channels = K.expand_dims(yy_channels, axis=1)
            yy_channels = K.tile(yy_channels, [1, dim1, 1, 1, 1])

            zz_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              K.stack([batch_shape, 1]))
            zz_range = K.expand_dims(zz_range, axis=-1)
            zz_range = K.expand_dims(zz_range, axis=-1)

            zz_channels = K.tile(zz_range, [1, 1, dim2, dim3])
            zz_channels = K.expand_dims(zz_channels, axis=-1)

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
            xx_channels = xx_channels * 2 - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
            yy_channels = yy_channels * 2 - 1.

            zz_channels = K.cast(zz_channels, K.floatx())
            zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
            zz_channels = zz_channels * 2 - 1.

            outputs = K.concatenate(
                [inputs, zz_channels, xx_channels, yy_channels], axis=-1)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])

        return outputs
示例#18
0
def rg_bbox_coord_loss(true, pred):
    lambda_coord = 5
    lambda_noobj = 0.5

    true_class = true[..., 10:]  # b * 7 * 7 * 20
    true_box = true[..., :4]  # b * 7 * 7 * 4 (0:5 == 5:10)
    true_conf = K.expand_dims(true[..., 4])  # b*7*7*1

    pred_class = pred[..., 10:]  # b*7*7*20
    pred_box1 = pred[..., :4]  # b*7*7*4
    pred_box2 = pred[..., 5:9]  # b*7*7*4
    pred_conf1 = K.expand_dims(pred[..., 4])  # b*7*7*1
    pred_conf2 = K.expand_dims(pred[..., 9])  # b*7*7*1
    pred_confs = K.concatenate([pred_conf1, pred_conf2])  # b*7*7*2

    # cls IoU and select best box iou
    box1_iou = cls_iou(true_box, pred_box1)  # b*7*7*1
    box2_iou = cls_iou(true_box, pred_box2)  # b*7*7*1

    box_ious = K.concatenate([box1_iou, box2_iou], axis=3)  # b*7*7*2
    box_ious = K.expand_dims(box_ious)  # b*7*7*2*1

    best_iou = K.max(box_ious, axis=4)  # b*7*7*2
    best_p = K.max(best_iou, axis=3, keepdims=True)  # b*7*7*1

    box_p = K.cast(best_iou >= best_p, K.dtype(best_p))  # b*7*7*2

    noobj_loss = lambda_noobj * (1 - box_p * true_conf) * K.square(
        0 - pred_confs)  # b*7*7*2
    obj_loss = box_p * true_conf * K.square(1 - pred_confs)  # b*7*7*2

    # Confidence Loss
    conf_loss = K.sum(noobj_loss + obj_loss)

    # Class Loss
    class_loss = true_conf * K.square(true_class - pred_class)  # b*7*7*20
    class_loss = K.sum(class_loss)

    # Box Loss
    pred_box_xy = K.concatenate([
        K.expand_dims(pred_box1[..., :2], axis=3),
        K.expand_dims(pred_box2[..., :2], axis=3)
    ],
                                axis=3)  # b*7*7*2*2
    pred_box_wh = K.concatenate([
        K.expand_dims(pred_box1[..., 2:], axis=3),
        K.expand_dims(pred_box2[..., 2:], axis=3)
    ],
                                axis=3)  # b*7*7*2*2
    true_box_xy = K.expand_dims(true_box[..., :2], axis=3)  # b*7*7*1*2
    true_box_wh = K.expand_dims(true_box[..., 2:], axis=3)  # b*7*7*1*2
    box_p = K.expand_dims(box_p)  # b*7*7*1*1
    true_conf = K.expand_dims(true_conf)  # b*7*7*1*1

    coord_loss = lambda_coord * box_p * true_conf * K.square(
        (true_box_xy - pred_box_xy))  # b*7*7*2*2
    line_loss = lambda_coord * box_p * true_conf * K.square(
        (K.sqrt(true_box_wh) - K.sqrt(pred_box_wh)))  # b*7*7*2*2
    box_loss = K.sum(coord_loss + line_loss)

    loss = box_loss + conf_loss + class_loss

    return loss
示例#19
0
    def call(self, inputs):

        # import pdb; pdb.set_trace()
        # To channels last
        x = tf.transpose(inputs[0], [0, 3, 1, 2])

        # Get weight and bias modulations
        # Make sure w's shape is compatible with self.kernel
        # print('www', inputs[1])
        w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis=1),
                                        axis=1),
                          axis=-1)

        # Add minibatch layer to weights
        wo = K.expand_dims(self.kernel, axis=0)

        # Modulate
        weights = wo * (w + 1)

        # print('weights 1', weights.shape, wo.shape, w.shape)

        # Demodulate
        if self.demod:
            d = K.sqrt(
                K.sum(K.square(weights), axis=[1, 2, 3], keepdims=True) + 1e-8)
            weights = weights / d
        # print('weights 2', weights.shape)

        # Changed because CPU mode DO NOT support groups and NCHW
        # Reshape/scale input
        # x = tf.reshape(x,
        #                [1, -1, x.shape[2], x.shape[3]
        #                 ])  # Fused => reshape minibatch to convolution groups.
        # w = tf.reshape(
        #     tf.transpose(weights, [1, 2, 3, 0, 4]),
        #     [weights.shape[3], weights.shape[1], weights.shape[2], -1])

        # HCHW --> NHWC
        # before x: (1, None, 4, 4) w: (96, 3, 3, None)
        # after x:
        x = tf.transpose(x, [0, 2, 3, 1])
        # w = tf.transpose(w, [1, 2, 0, 3])
        # print(x.shape, w.shape)
        # import pdb; pdb.set_trace()
        rets = []
        for i in range(16):
            ret = tf.nn.conv2d(x[i:i + 1],
                               weights[i],
                               strides=self.strides,
                               padding="SAME",
                               data_format="NHWC")
            rets.append(ret)
        x = tf.keras.layers.Concatenate(axis=0)(rets)

        # NHWC --> NCHW
        # x = tf.transpose(x, [0, 3, 1, 2])

        # # Reshape/scale output.
        # x = tf.reshape(
        #     x, [-1, self.filters, x.shape[2], x.shape[3]
        #         ])  # Fused => reshape convolution groups back to minibatch.
        # x = tf.transpose(x, [0, 2, 3, 1])

        return x
示例#20
0
def alpha_prediction_loss(y_true, y_pred):
    mask = y_true[:, :, :, 1]
    diff = y_pred[:, :, :, 0] - y_true[:, :, :, 0]
    diff = diff * mask
    num_pixels = K.sum(mask)
    return K.sum(K.sqrt(K.square(diff) + epsilon_sqr)) / (num_pixels + epsilon)
示例#21
0
def mse_loss(x):
    y_true, y_pred = x
    return K.sqrt(y_true - y_pred)
示例#22
0
 def rmse(y_true, y_pred):
     return sqrt(mean(square(y_pred - y_true)))
示例#23
0
def euclidean_distance(vects):
    x, y = vects
    sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
示例#24
0
 def l2row(self, X):
     N = kb.sqrt(kb.sum(X**2, axis=1) + self.epsilon)
     Y = kb.transpose(kb.transpose(X) / N)
     return Y
示例#25
0
def root_mean_squared_error(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true)))
示例#26
0
def l3(y_true, y_pred):
    x = K.mean(K.sqrt(K.sum(K.square(y_true-y_pred), axis=-1)))
    return x
示例#27
0
 def call(self, inputs):
     (target, wrt) = inputs
     grad = tf.gradients(target, wrt)[0]
     return K.sqrt(
         tf.math.reduce_sum(
             K.batch_flatten(K.square(grad)), axis=1, keepdims=True)) - 1
示例#28
0
def normalize_by_dim(x,dim=1024.):
    d=tf.convert_to_tensor(dim)
    return x/K.sqrt(d)
示例#29
0
def custom_loss_rmse(y_true,y_pred):
    loss=K.sqrt(K.mean(K.square(y_pred-y_true),axis=None))  #+K.sum(0*K.abs(penalty)) #can adjust the penalty weight
    return loss
 def rmse(y_true, y_pred):
   import tensorflow.keras.backend as K
   return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def rmse2(true, pred):
    assert pred.shape[0]==true.shape[0]
    loss_1 = K.sqrt(K.mean(K.square(true[:, 0:1] - pred) + 1e-18))
    return loss_1 * true[:, 1:2] / K.sum(true[:, 1:2])