Example #1
0
 def style_loss(self, conv):
     s_loss_ = 0.0
     for conv_output in conv:
         s_loss_ += l2_loss(
             self.Gram_matrix(conv_output[1]) -
             self.Gram_matrix(conv_output[2])) / (4.0 * tf.square(
                 tf.reduce_prod(
                     tf.cast(tf.shape(conv_output[1]), tf.float32))))
     return s_loss_
Example #2
0
# tf.random.set_seed(0)
# y_pred = tf.random.uniform((16, 1))
# print(losses.binary_crossentropy(y_true, y_pred, True, 0.1))
# print(_binary_crossentropy(y_true, y_pred, True, 0.1))
#
# np.random.seed(0)
# y_true = tf.convert_to_tensor(np.random.randint(0, 2, (16, 2)), dtype=tf.float32)
# tf.random.set_seed(0)
# y_pred = tf.random.uniform((16, 2))
# print(losses.binary_crossentropy(y_true, y_pred, True, 0.1))
# print(_binary_crossentropy(y_true, y_pred, True, 0.1))


def _l2_loss(w):
    """l2 正则时.

    :param w:
    :return:
    """
    return tf.reduce_sum(w**2 / 2)


# loss_regularization = []
# loss_regularization.append(nn.l2_loss(w1))
# loss_regularization.append(nn.l2_loss(w2))
# loss = loss_ce + sum(loss_regularization) * weight_decay

x = tf.random.normal((10, 20))
print(nn.l2_loss(x))
print(_l2_loss(x))
Example #3
0
w1 = tf.Variable(tf.random.truncated_normal((2, 20), 0, 0.1), dtype=tf.float32)
b1 = tf.Variable(tf.constant(0.01, shape=(20, )))
w2 = tf.Variable(tf.random.truncated_normal((20, 1), 0, 0.1), dtype=tf.float32)
b2 = tf.Variable(tf.constant(0.01, shape=(1, )))
params = [w1, b1, w2, b2]
for epoch in range(epochs):
    loss_all = 0.
    for i, (x, y_true) in enumerate(train_db):
        with tf.GradientTape() as tape:
            x = x @ w1 + b1
            x = nn.relu(x)
            y_pred = x @ w2 + b2
            loss_ce = tf.reduce_mean(
                losses.binary_crossentropy(y_true, y_pred, True))
            loss_regularization = []
            loss_regularization.append(nn.l2_loss(w1))
            loss_regularization.append(nn.l2_loss(w2))
            loss = loss_ce + sum(loss_regularization) * weight_decay
        grads = tape.gradient(loss, params)
        loss_all += loss.numpy()
        for i in range(len(params)):
            params[i].assign_sub(lr * grads[i])
    print("Epoch: %d| Loss: %.6f" % (epoch, loss_all / len(train_db)))

# 预测部分
print("*******predict*******")
_x = np.arange(-3, 3, 0.1)
x_grid, y_grid = np.meshgrid(_x, _x)
x_test = np.stack([x_grid.ravel(), y_grid.ravel()], axis=1)  # shape[3600, 2]
x_test = tf.cast(x_test, tf.float32)
Example #4
0
 def content_loss(self, conv):
     c_loss_ = 0.0
     for conv_output in conv:
         c_loss_ += l2_loss(conv_output[0] - conv_output[2])
     return c_loss_