def dof_loss(y_true, y_pred): y_true = lf_zero_to_one(y_true) y_pred = lf_zero_to_one(y_pred) dof_true = tf.reduce_mean(y_true, axis=(3, 4)) dof_pred = tf.reduce_mean(y_pred, axis=(3, 4)) MAE = tf.reduce_mean(losses.MAE(dof_true, dof_pred), axis=(1, 2)) ssim = tf.image.ssim(dof_true, dof_pred, max_val=1.0) return tf.reduce_sum( tf.constant(.85) * (tf.constant(1.) - ssim) / tf.constant(2.) + tf.constant(0.15) * MAE)
def photometric_loss(y_true, y_pred): y_true = lf_zero_to_one(y_true) y_pred = lf_zero_to_one(y_pred) MAE = tf.reduce_mean(losses.MAE(y_true, y_pred), axis=(1, 2)) y_true = tf.transpose(y_true, [0, 3, 4, 1, 2, 5]) y_pred = tf.transpose(y_pred, [0, 3, 4, 1, 2, 5]) ssim = tf.image.ssim(y_pred, y_true, max_val=1.0) return tf.reduce_sum( tf.constant(.85) * (tf.constant(1.) - ssim) / tf.constant(2.) + tf.constant(0.15) * MAE)
def train(model, train_db, optimizer, normed_test_data, test_labels): train_mae_losses = [] test_mae_losses = [] for epoch in range(200): for step, (x, y) in enumerate(train_db): with tf.GradientTape() as tape: out = model(x) loss = tf.reduce_mean(losses.MSE(y, out)) mae_loss = tf.reduce_mean(losses.MAE(y, out)) if step % 10 == 0: print(epoch, step, float(loss)) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_mae_losses.append(float(mae_loss)) out = model(tf.constant(normed_test_data.values)) test_mae_losses.append(tf.reduce_mean(losses.MAE(test_labels, out))) return train_mae_losses, test_mae_losses
def total_loss(boxes_gt, masks_gt, input_p, box_pred, mask_pred, rel_scores, loss): y1 = K.flatten(boxes_gt) y2 = K.flatten(masks_gt) y1_pred = K.flatten(box_pred) y2_pred = K.flatten(mask_pred) input_p = K.expand_dims(input_p, axis=0) if loss == 'MSE': box_loss = losses.MSE(y1, y1_pred) else: box_loss = losses.MAE(y1, y1_pred) mask_loss = losses.BinaryCrossentropy(from_logits=True)(y2, y2_pred) cos_sim = losses.CosineSimilarity()(boxes_gt, box_pred) loss_predicate = losses.categorical_crossentropy( input_p, K.reshape(rel_scores, input_p.shape)) return K.mean(box_loss * 10 + 0.01 * mask_loss + 0.001 * loss_predicate)
# # 未训练时测试 # example_batch = normed_train_data[:10] # example_result = model.predict(example_batch) # example_result train_mae_losses = [] test_mae_losses = [] for epoch in range(200): for step, (x, y) in enumerate(train_db): with tf.GradientTape() as tape: out = model(x) # mse_lose 均方差 Mean Square Error loss = tf.reduce_mean(losses.MSE(y, out)) # mae_lose 平均绝对误差,Mean Absolute Error mae_loss = tf.reduce_mean(losses.MAE(y, out)) if step % 10 == 0: print(epoch, step, float(loss)) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_mae_losses.append(float(mae_loss)) # 把test的x带入计算out出来 out = model(tf.constant(normed_test_data.values)) # 在用这个out,统计MAE test_mae_losses.append(tf.reduce_mean(losses.MAE(test_labels, out))) plt.figure() plt.xlabel('Epoch')
self.fc2 = layers.Dense(64, activation='relu') self.fc3 = layers.Dense(1) def call(self, inputs, training=None, mask=None): x = self.fc1(inputs) x = self.fc2(x) return self.fc3(x) model = DNN() model.build(input_shape=(None, 9)) print(model.summary()) optimizer = tf.keras.optimizers.RMSprop(1e-3) L = [] loss_meter = metrics.Mean() for epoch in range(20): loss_meter.reset_states() for step, (x, y) in enumerate(train_db): with tf.GradientTape() as tape: out = model(x) loss = tf.reduce_mean(losses.MAE(y, out)) if (step + 1) % 10 == 0: print(epoch, step + 1, loss.numpy()) L.append(loss.numpy()) loss_meter.update_state(float(loss)) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) plt.plot(L) plt.show()
def train(self, batch_size=64, epochs=4): criterion = losses.mean_squared_error optimizer = optim.Adam(lr=0.001) loops = self.index // batch_size df = pd.read_csv(f'{self.directory}/steering.csv') for e in range(epochs): for i in range(loops): B = np.random.randint(0, self.index, size=batch_size) X = np.zeros((batch_size, 160, 320, 3)) S = np.zeros((batch_size, 1)) for b in range(batch_size): X[b] = Warp( mpimg.imread(f'{self.directory}/img/{B[b]}.jpg'), src, target) / 256 X = np.array(X, dtype=np.float32) S[b] = df.iloc[B[b]].steering if np.random.choice([True, False]): X[b] = np.flip(X[b], 1) S[b] = -S[b] else: pass if self.net.decode: with tf.GradientTape() as t: output, dec = self.net(X) loss1 = losses.MSE(S, output) grads = t.gradient( loss1, self.net.encoder.trainable_variables + self.net.predict_conv.trainable_variables + self.net.predict.trainable_variables) optimizer.apply_gradients( zip( grads, self.net.encoder.trainable_variables + self.net.predict_conv.trainable_variables + self.net.predict.trainable_variables)) with tf.GradientTape() as t: output, dec = self.net(X) loss2 = losses.MAE(X, dec) grads = t.gradient( loss2, self.net.encoder.trainable_variables + self.net.decoder.trainable_variables) optimizer.apply_gradients( zip( grads, self.net.encoder.trainable_variables + self.net.decoder.trainable_variables)) """ if e+i == 0: self.net.compile(optimizer='adam', loss='MSE') self.net.fit(X,S, batch_size=batch_size, shuffle=False)""" print( f"epochs {self.net.epochs} | loss1 = {np.sum(loss1):.2f} | loss2 = {np.sum(loss2):.2f}\n" ) else: with tf.GradientTape() as t: output = self.net(X) loss = losses.MSE(S, output) grads = t.gradient(loss, self.net.trainable_variables) optimizer.apply_gradients( zip(grads, self.net.trainable_variables)) """ if e+i == 0: self.net.compile(optimizer='adam', loss='MSE') self.net.fit(X,S, batch_size=batch_size, shuffle=False)""" print( f"epochs {self.net.epochs} | loss1 = {np.sum(loss):.2f}\n" ) self.net.epochs += 1 self.net.save_model('model_check_points')
def photometric_loss_center(y_true, y_pred): y_true = lf_zero_to_one(y_true) y_pred = lf_zero_to_one(y_pred) MAE = tf.reduce_mean(losses.MAE(y_true, y_pred), axis=(1, 2)) return tf.reduce_sum(tf.constant(.85) * (tf.constant(1.) - tf.image.ssim(y_true, y_pred, max_val=1.0)/tf.constant(2.)) \ + tf.constant(0.15) * MAE)