コード例 #1
0
ファイル: train.py プロジェクト: Stevenzzz1996/MLLCV
def train_step(image_data, target):
    with tf.GradientTape() as tape:
        pred_result = model(image_data, training=True)
        giou_loss=conf_loss=prob_loss=0

        # optimizing process
        for i in range(3):
            conv, pred = pred_result[i*2], pred_result[i*2+1]
            loss_items = compute_loss(pred, conv, *target[i], i)
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss

        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        tf.print("=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                 "prob_loss: %4.2f   total_loss: %4.2f" %(global_steps, optimizer.lr.numpy(),
                                                          giou_loss, conf_loss,
                                                        prob_loss, total_loss))

        # update learning rate
        global_steps.assign_add(1)
        if global_steps < warmup_steps:
            lr = global_steps / warmup_steps *cfg.TRAIN.LR_INIT
        else:
            lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
            )
        tf.keras.optimizer.lr.assign(lr.numpy())
コード例 #2
0
def yolo_loss(target, output):
    """计算损失,for循环计算三个采样率下的损失,注意:此处取三种采样率下的总损失而不是平均损失"""
    giou_loss=conf_loss=prob_loss=0
    for i in range(3):
        # pred.shape (8, 52, 52, 3, 25) -----  output[i].shape  (8, 52, 52, 75)
        pred = yolov3.decode(output[i], i)
        loss_items = yolov3.compute_loss(pred, output[i], *target[i], i)
        giou_loss += loss_items[0]
        conf_loss += loss_items[1]
        prob_loss += loss_items[2]
    return [giou_loss, conf_loss, prob_loss]
コード例 #3
0
def train_step(image_data, target):
    with tf.GradientTape() as tape:
        pred_result = model(image_data, training=True)
        giou_loss = conf_loss = prob_loss = 0

        for i in range(3):
            """
            The output is order of conv, pred
            since output_tensors appended in the following order
                output_tensors.append(conv_tensor)
                output_tensors.append(pred_tensor)
            """
            conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
            loss_items = compute_loss(pred, conv, *target[i], i)
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss

        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        tf.print("=> STEP %4d lr: %.6f giou_loss: %4.2f conf_loss: %4.2f "\
                 "prob_loss: %4.2f total_loss: %4.2f" % (global_steps,
                                                         optimizer.lr.numpy(),
                                                         giou_loss,
                                                         conf_loss,
                                                         prob_loss,
                                                         total_loss))
        # update the learning rate
        global_steps.assign_add(1)
        if global_steps < warmup_steps:
            lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
        else:
            lr = cfg.TRAIN.LR_END + 0.5 * (
                cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * ((1 + tf.cos(
                    (global_steps - warmup_steps) /
                    (total_steps - warmup_steps) * np.pi)))
        optimizer.lr.assign(lr.numpy())

        # it writes the summary
        with writer.as_default():
            tf.summary.scalar("lr", optimizer.lr, step=global_steps)
            tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
            tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
            tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
            tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
        # it forces summary writer to send any buffered data to storage
        writer.flush()
コード例 #4
0
ファイル: train.py プロジェクト: tusharkalecam/MLOps-YoloV3
    def __train_step(self, image_data, target, model, global_steps, writer,
                     optimizer, warmup_steps, total_steps):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            for i in range(3):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, *target[i], i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))
            tf.print(
                "=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                "prob_loss: %4.2f   total_loss: %4.2f" %
                (global_steps, optimizer.lr.numpy(), giou_loss, conf_loss,
                 prob_loss, total_loss))
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (
                    cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * ((1 + tf.cos(
                        (global_steps - warmup_steps) /
                        (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(lr.numpy())

            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss",
                                  total_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/giou_loss",
                                  giou_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/conf_loss",
                                  conf_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/prob_loss",
                                  prob_loss,
                                  step=global_steps)
            writer.flush()
コード例 #5
0
ファイル: plot.py プロジェクト: Martin0Li/YOLOv3_keras
def yolo_loss(args):

    pred_result = args[:6]
    target = args[6]

    giou_loss = conf_loss = prob_loss = 0
    for i in range(3):
        conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
        target1, target2 = target[i * 2], target[i * 2 + 1]
        loss_items = compute_loss(pred, conv, target1, target2, i)
        giou_loss += loss_items[0]
        conf_loss += loss_items[1]
        prob_loss += loss_items[2]

    total_loss = giou_loss + conf_loss + prob_loss
    total_loss = tf.convert_to_tensor([total_loss])
    return total_loss
コード例 #6
0
def train_step(image, target):
    # 需要将前向传播放在tape里
    with tf.GradientTape() as tape:
        output_layers = model(image, training=True)
        giou_loss = conf_loss = prob_loss = 0

        # optimizing process
        for i in range(3):
            conv, pred = output_layers[2 * i], output_layers[2 * i + 1]
            # 给这个函数定义多个同类的形参,在使用时,带一个*号的在方法中会被存储为元组
            # https://blog.csdn.net/wangjvv/article/details/79703509
            loss_items = compute_loss(pred, conv, *target[i], i)
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss
        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        tf.print(
            "=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
            "prob_loss: %4.2f   total_loss: %4.2f" %
            (global_steps.numpy(), optimizer.lr.numpy(), giou_loss, conf_loss,
             prob_loss, total_loss))

        # update learning rate
        global_steps.assign_add(1)
        if global_steps < warmup_steps:
            lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
        else:
            lr = cfg.TRAIN.LR_END + 0.5 * (
                cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * ((1 + tf.cos(
                    (global_steps - warmup_steps) /
                    (total_steps - warmup_steps) * np.pi)))
        # global_steps是tensor
        optimizer.lr.assign(lr.numpy())

        # writing summary data
        with writer.as_default():
            tf.summary.scalar("lr", optimizer.lr, step=global_steps)
            tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
            tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
            tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
            tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
        writer.flush()
コード例 #7
0
def train_step(image_data, target):
    with tf.GradientTape() as tape:
        pred_result = model(image_data, training=True)
        '''
        print('train.py/train_step')
        pred_result is list
        print(np.array(pred_result).shape) : (6, 4)
        print(pred_result[0].shape) : (4, 52, 52, 255)   => conv
        print(pred_result[1].shape) : (4, 52, 52, 3, 85) => pred
        print(pred_result[2].shape) : (4, 26, 26, 255)   => conv
        print(pred_result[3].shape) : (4, 26, 26, 3, 85) => pred
        print(pred_result[4].shape) : (4, 13, 13, 255)   => conv
        print(pred_result[5].shape) : (4, 13, 13, 3, 85) => pred
        '''
        giou_loss=conf_loss=prob_loss=0

        # optimizing process
        for i in range(3): # Iterating through each scale levels (small/medium/large)
            conv, pred = pred_result[i*2], pred_result[i*2+1]
            '''
            print('train.py/train_step')
            print(conv.shape) : (4, 52, 52, 255)
                                (4, 26, 26, 255)
                                (4, 13, 13, 255)
            print(pred.shape) : (4, 52, 52, 3, 85)
                                (4, 26, 26, 3, 85)
                                (4, 13, 13, 3, 85)
            '''
            loss_items = compute_loss(pred, conv, *target[i], i)
            # loss_items => [giou_loss, conf_loss, prob_loss]

            '''
            List of (batch_smaller_target, batch_medium_target, batch_larger_target)
            print(len(target)) => 3 | select (batch_smaller_target, batch_medium_target, batch_larger_target)
            print(len(target[0])) => 2 | select (batch_label_sbbox, batch_sbboxes)

            ----- (for batch_label_sbbox)
            print(len(target[0][0])) => 4 | batch size 
            print(len(target[0][0][0])) => 52 | cell size
            print(len(target[0][0][0][0])) => 52 | cell size
            print(len(target[0][0][0][0][0])) => 3 | 3 anchors per each level
            print(len(target[0][0][0][0][0][0])) => 85 | xywh(4), conf(1), prob(80)

            ----- (for batch_sbboxes)
            print(len(target[0][1]))    => 4   (Batch size)
            print(len(target[0][1][0])) => 150 (Each image contains maximum 150 bboxes)
            print(len(target[0][1][1])) => 150
            print(len(target[0][1][2])) => 150
            print(len(target[0][1][3])) => 150
            print(len(target[0][1][0][120])) => 4 (Each bbox has xywh info)
            '''
            
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss

        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        tf.print("=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                 "prob_loss: %4.2f   total_loss: %4.2f" %(global_steps, optimizer.lr.numpy(),
                                                          giou_loss, conf_loss,
                                                          prob_loss, total_loss))
        # update learning rate
        global_steps.assign_add(1)
        if global_steps < warmup_steps:
            lr = global_steps / warmup_steps *cfg.TRAIN.LR_INIT
        else:
            lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
            )
        optimizer.lr.assign(lr.numpy())

        # writing summary data
        with writer.as_default():
            tf.summary.scalar("lr", optimizer.lr, step=global_steps)
            tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
            tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
            tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
            tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
        writer.flush()
コード例 #8
0
testset = Dataset('test')
sum_giou_loss = 0
sum_conf_loss = 0
sum_prob_loss = 0
sum_total_loss = 0
n = 0
for image_data, target in testset:    # for every batch
    test_result = model(image_data, training=False)
    giou_loss = 0
    conf_loss = 0
    prob_loss = 0

    # sum up loss over 3 scales
    for i in range(3):
        conv, pred = test_result[i * 2], test_result[i * 2 + 1]    # get output and decoded ouput(pred) simultanuously
        loss_items = compute_loss(pred, conv, *target[i], i)    # compare decoded output and label for loss computation
        giou_loss += loss_items[0]
        conf_loss += loss_items[1]
        prob_loss += loss_items[2]
    total_loss = giou_loss + conf_loss + prob_loss

    sum_giou_loss += giou_loss
    sum_conf_loss += conf_loss
    sum_prob_loss += prob_loss
    sum_total_loss += total_loss
    n += 1

    tf.print("val_giou_loss: %4.2f   val_conf_loss: %4.2f   "
             "val_prob_loss: %4.2f   val_total_loss: %4.2f" % (giou_loss, conf_loss, prob_loss, total_loss))

# average loss among of all validation set