Exemplo n.º 1
0
def train(generator,
          optimizer,
          model,
          num_epoches=500,
          verbose=10,
          fname="weights"):
    def grads_fn(images_tensor, list_y_trues):
        with tf.GradientTape() as tape:
            logits = model(images_tensor)
            loss = loss_fn(list_y_trues, logits)
        return tape.gradient(loss, model.variables)

    min_loss_value = np.inf
    for i in range(num_epoches):
        x_batch, yolo_1, yolo_2, yolo_3 = generator[i]

        images_tensor = tf.constant(x_batch.astype(np.float32))
        list_y_trues = [
            tf.constant(yolo_1.astype(np.float32)),
            tf.constant(yolo_2.astype(np.float32)),
            tf.constant(yolo_3.astype(np.float32))
        ]

        grads = grads_fn(images_tensor, list_y_trues)
        optimizer.apply_gradients(zip(grads, model.variables))

        if i == 0 or (i + 1) % verbose == 0:
            logits = model(images_tensor)
            loss_value = loss_fn(list_y_trues, logits)
            print("{}-th loss = {}".format(i, loss_value))

            if min_loss_value > loss_value:
                print("    update weight {}".format(loss_value))
                min_loss_value = loss_value
                model.save_weights("{}.h5".format(fname))
Exemplo n.º 2
0
def _grad_fn(model,
             images_tensor,
             list_y_true,
             list_y_pred=None) -> 'compute gradient & loss':
    with tf.GradientTape() as tape:
        list_y_pred = model(images_tensor)
        loss = loss_fn(list_y_true, list_y_pred)
        with tape.stop_recording():
            grads = tape.gradient(loss, model.trainable_variables)
    return grads, loss
Exemplo n.º 3
0
def _loop_validation(model, iterator):
    # one epoch
    n_steps = iterator.steps_per_epoch
    loss_value = 0
    for _ in range(n_steps):
        xs, yolo_1, yolo_2, yolo_3 = iterator.get_next()
        ys = [yolo_1, yolo_2, yolo_3]
        ys_ = model(xs)
        loss_value += loss_fn(ys, ys_)
    loss_value /= iterator.steps_per_epoch
    return loss_value
Exemplo n.º 4
0
def validation_loop(model, generator):
    loss_value = 0
    batchesCount = generator.datasetBatchesCount()
    assert batchesCount > 0
    for xs, yolo_1, yolo_2, yolo_3 in tqdm(generator.batches(), 'Validating',
                                           batchesCount):
        ys_gt = [yolo_1, yolo_2, yolo_3]
        ys_predicted = model(xs)
        loss_value += loss_fn(ys_gt, ys_predicted)
    avgLossValue = loss_value / batchesCount
    return avgLossValue
Exemplo n.º 5
0
def _loop_validation(config_parser, model, generator, epoch, writer):
    # one epoch
    n_steps = generator.steps_per_epoch
    loss_value = 0
    for i in range(n_steps):
        xs, yolo_1, yolo_2, yolo_3, true_boxes = generator.next_batch()
        ys = [yolo_1, yolo_2, yolo_3]
        logits = model(xs)
        loss_value += loss_fn(ys, logits)
    loss_value /= generator.steps_per_epoch

    # Log validation loss
    step = (epoch + 1) * n_steps
    tf.summary.scalar("validation_loss", loss_value, step=step)

    # Log input validation image with bounding boxes
    validation_image = xs[0] * 255
    for validation_boxes in true_boxes[0]:
        x1 = validation_boxes[0]
        y1 = validation_boxes[1]
        x2 = validation_boxes[2]
        y2 = validation_boxes[3]
        draw_bounding_box_on_image_array(validation_image,
                                         y1,
                                         x1,
                                         y2,
                                         x2,
                                         use_normalized_coordinates=False)
    tf.summary.image("validation_image", [validation_image / 255.], step=step)

    # Log prediction with bounding boxes
    image = xs[0] * 255
    detector = config_parser.create_detector(model)
    boxes, labels, probs = detector.detect(image, 0.8)
    visualize_boxes(image, boxes, labels, probs, config_parser.get_labels())
    tf.summary.image("validation_prediction", [image / 255.], step=step)

    writer.flush()

    return loss_value
Exemplo n.º 6
0
def _grad_fn(model, images_tensor, list_y_trues):
    with tf.GradientTape() as tape:
        logits = model(images_tensor)
        loss = loss_fn(list_y_trues, logits)
    return tape.gradient(loss, model.variables)