Пример #1
0
 def train_step(batch_images, batch_labels):
     with tf.GradientTape() as tape:
         pred = ssd(batch_images, training=True)
         output = ssd_prediction(feature_maps=pred, num_classes=NUM_CLASSES + 1)
         gt = MakeGT(batch_labels, pred)
         gt_boxes = gt.generate_gt_boxes()
         loss_value = loss(y_true=gt_boxes, y_pred=output)
     gradients = tape.gradient(loss_value, ssd.trainable_variables)
     optimizer.apply_gradients(grads_and_vars=zip(gradients, ssd.trainable_variables))
     loss_metric.update_state(values=loss_value)
Пример #2
0
 def train_step(batch_images, batch_labels):
     with tf.GradientTape() as tape:
         pred = ssd(batch_images, training=True)#调用call函数得到最初的各个层的feature map,每一个feature Map为[  1  19  19 150]
         print("pred",tf.shape(pred[1]),"\n")
         #print("prediction finished")
         output = ssd_prediction(feature_maps=pred, num_classes=NUM_CLASSES + 1)#将featureMap的预测值经行一定得排列,shape:[1,8732,25]
         #print("out",tf.shape(output))
         gt = MakeGT(batch_labels, pred)#计算ground Truth
         gt_boxes = gt.generate_gt_boxes()#产生gt_box
         loss_value, cls_loss, reg_loss = loss(y_true=gt_boxes, y_pred=output)
     gradients = tape.gradient(loss_value, ssd.trainable_variables)
     optimizer.apply_gradients(grads_and_vars=zip(gradients, ssd.trainable_variables))
     loss_metric.update_state(values=loss_value)
     cls_loss_metric.update_state(values=cls_loss)
     reg_loss_metric.update_state(values=reg_loss)