コード例 #1
0
    def build_model(self, weight=None):
        input_layer = tf.keras.layers.Input([INPUT_SIZE, INPUT_SIZE, 3])

        feature_maps = YOLO(input_layer, self.NUM_CLASS, "yolov4", False)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, INPUT_SIZE // 8, self.NUM_CLASS,
                                           self.STRIDES, self.ANCHORS, i,
                                           self.XYSCALE)
            elif i == 1:
                bbox_tensor = decode_train(fm, INPUT_SIZE // 16,
                                           self.NUM_CLASS, self.STRIDES,
                                           self.ANCHORS, i, self.XYSCALE)
            else:
                bbox_tensor = decode_train(fm, INPUT_SIZE // 32,
                                           self.NUM_CLASS, self.STRIDES,
                                           self.ANCHORS, i, self.XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(input_layer, bbox_tensors)

        if weight is not None:
            model.load_weights(weight)

        return model
コード例 #2
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    # if len(physical_devices) > 0:
    if physical_devices:
        # tf.config.experimental.set_memory_growth(physical_devices[0], True)
        tf.config.experimental.set_visible_devices(physical_devices[0], "GPU")

    trainset = Dataset(FLAGS, is_training=True)
    testset = Dataset(FLAGS, is_training=False)
    logdir = "./data/log"
    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
    # train_steps = (first_stage_epochs + second_stage_epochs) * steps_per_period

    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH

    freeze_layers = utils.load_freeze_layer(FLAGS.model, FLAGS.tiny)

    feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
    if FLAGS.tiny:
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    else:
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            elif i == 1:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()

    if FLAGS.weights == None:
        print("Training from scratch")
    else:
        if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) - 1] == "weights":
            utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
        else:
            model.load_weights(FLAGS.weights)
        print('Restoring weights from: %s ... ' % FLAGS.weights)


    optimizer = tf.keras.optimizers.Adam()
    if os.path.exists(logdir): shutil.rmtree(logdir)
    writer = tf.summary.create_file_writer(logdir)

    # define training step function
    # @tf.function
    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            tf.print("=> STEP %4d/%4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, total_steps, optimizer.lr.numpy(),
                                                               giou_loss, conf_loss,
                                                               prob_loss, total_loss))
            # update learning rate
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
                )
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
                tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
                tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
                tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
            writer.flush()
    def test_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
                                                               prob_loss, total_loss))

    for epoch in range(first_stage_epochs + second_stage_epochs):
        if epoch < first_stage_epochs:
            if not isfreeze:
                isfreeze = True
                for name in freeze_layers:
                    freeze = model.get_layer(name)
                    freeze_all(freeze)
        elif epoch >= first_stage_epochs:
            if isfreeze:
                isfreeze = False
                for name in freeze_layers:
                    freeze = model.get_layer(name)
                    unfreeze_all(freeze)
        for image_data, target in trainset:
            train_step(image_data, target)
        for image_data, target in testset:
            test_step(image_data, target)
        model.save_weights("./checkpoints/yolov4")
コード例 #3
0
def main(_argv):
    input_channel = 3
    patience = 30
    steps_in_epoch = 0
    epoch_loss = np.inf
    prev_minloss = np.inf

    trainset = Dataset(FLAGS,input_channel, is_training=True)
    # testset = Dataset(FLAGS, input_channel, is_training=False)
    logdir = "./data/log"
    isfreeze = False
    steps_per_epoch = len(trainset)
    print("steps_per_epoch:{}".format(steps_per_epoch))

    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
    # train_steps = (first_stage_epochs + second_stage_epochs) * steps_per_period

    loss_tracker = []
    losses_in_epoch = 0.
    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH

    freeze_layers = utils.load_freeze_layer(FLAGS.model, FLAGS.num_detection_layer)
    # feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
    feature_maps = YOLOv4_more_tiny(input_layer, NUM_CLASS)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):        # fm shape: (None, featw, feath, filters)
        bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
        bbox_tensors.append(fm)
        bbox_tensors.append(bbox_tensor)

    if cfg.YOLO.NUM_YOLOLAYERS == 3:  # yolov4
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            elif i == 1:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    elif cfg.YOLO.NUM_YOLOLAYERS == 2:  # yolo tiny
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    elif cfg.YOLO.NUM_YOLOLAYERS == 1: # custom yolo
        bbox_tensors = []
        bbox_tensor = decode_train(feature_maps[0], cfg.TRAIN.INPUT_SIZE // cfg.YOLO.STRIDES_CUSTOM[0], NUM_CLASS, STRIDES, ANCHORS, 0, XYSCALE)
        bbox_tensors.append(feature_maps[0])
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()


    if FLAGS.weights == None:
        print("Training from scratch")
    else:
        if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) - 1] == "weights":
            utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.num_detection_layer)
        else:
            model.load_weights(FLAGS.weights)

        print('Restoring weights from: %s ... ' % FLAGS.weights)

    optimizer = tf.keras.optimizers.Adam()

    if os.path.exists(logdir): shutil.rmtree(logdir)
    writer = tf.summary.create_file_writer(logdir)

    # define training step function
    # @tf.function
    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            ciou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]  # target[i][1]:(32, 150, 4)
                # print("conv shape:{} pred shape:{}".format(tf.keras.backend.int_shape(conv), tf.keras.backend.int_shape(pred)))
                # print("target[i][0]:{} target[i][1]:{}".format(np.array(target[i][0]).shape, np.array(target[i][1]).shape))
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                ciou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = ciou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            tf.print("=> STEP %4d/%4d   lr: %.6f   ciou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, total_steps, optimizer.lr.numpy(),
                                                               ciou_loss, conf_loss,
                                                               prob_loss, total_loss))
            # update learning rate
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
                )
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
                tf.summary.scalar("loss/ciou_loss", ciou_loss, step=global_steps)
                tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
                tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
            writer.flush()
            # return total_loss

    @tf.function
    def test_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            ciou_loss = conf_loss = prob_loss = 0

            preds = None
            gt_infos = None
            gt_bboxes = None
            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)

                ciou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

                if FLAGS.show_map:
                    batch_, _, _, _, ch_ = tf.keras.backend.int_shape(pred)
                    if preds == None:
                        preds = tf.reshape(pred, (batch_, -1, ch_))
                    else:
                        preds = tf.concat([preds, tf.reshape(preds, (batch_, -1, ch_))], axis=1)

                    if gt_infos == None:
                        gt_infos = tf.reshape(target[i][0], (batch_, -1, ch_))
                        gt_bboxes = target[i][1]
                    else:
                        gt_infos = tf.concat([gt_infos, tf.reshape(target[i][0], (batch_, -1, ch_))], axis=1)
                        gt_bboxes = tf.concat([gt_bboxes, tf.reshape(target[i][1], (batch_, -1, 4))], axis=1)


            if FLAGS.show_map:
                map = compute_map(preds, gt_bboxes, gt_infos, NUM_CLASS)

            total_map = map
            total_loss = ciou_loss + conf_loss + prob_loss

            tf.print("=> TEST STEP %4d   ciou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, ciou_loss, conf_loss,
                                                               prob_loss, total_loss))
            tf.print("=> TEST STEP %4d map: %4.2f" % (total_map))
            # tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
            #          "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
            #                                                    prob_loss, total_loss))

    for epoch in range(first_stage_epochs + second_stage_epochs):
        start_ch = time.time()
        train_loss = 0.
        if epoch < first_stage_epochs:
            if not isfreeze:
                isfreeze = True
                for name in freeze_layers:
                    try:
                        freeze = model.get_layer(name)
                        freeze_all(freeze)
                    except ValueError:
                        pass
        elif epoch >= first_stage_epochs:
            if isfreeze:
                isfreeze = False
                for name in freeze_layers:
                    try: # try구문 추가
                        freeze = model.get_layer(name)
                        freeze_all(freeze)
                    except ValueError:
                        pass


        for image_data, target in trainset:
            # bboxes_list = target[0][1]
            # label_data = target[0][0]
            #
            # for batch_idx, bboxes in enumerate(bboxes_list):
            #     # print("bboxes:{}".format(bboxes))
            #     class_inds = []
            #     check = np.array(image_data[batch_idx]).reshape(cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3)
            #     # print(r"D:\tf_data_not\preprocessed_{}.npy".format(batch_idx))
            #     # np.save(r"D:\tf_data_not\image_{}.npy".format(batch_idx), check)
            #     # np.save(r"D:\tf_data_not\preprocessed_{}.npy".format(batch_idx), label_data[batch_idx])
            #     # np.save(r"D:\tf_data_not\bboxes_{}.npy".format(batch_idx), bboxes)
            #     label = label_data[batch_idx]
            #     label_class = label[...,5:]
            #     # print("label shape:{}".format(np.array(label).shape))
            #
            #     for line_label in label_class:
            #         if np.sum(line_label) > 0:
            #             print(line_label)
            #     class_ = np.array(label[..., 5:]).flatten()
            #     class_ = np.where(class_>0.1, class_, 0)
            #     for class_idx, class_label in enumerate(class_):
            #         if class_label > 0:
            #             class_inds.append(class_idx % cfg.YOLO.NUM_CLASSES)
            #
            #     for bbox_idx, bbox in enumerate(bboxes):
            #         half_h = bbox[3] / 2
            #         half_w = bbox[2] / 2
            #
            #         if np.sum(bbox) > 0:
            #             # print("class:{}".format(class_inds))
            #             cv2.rectangle(check, (int(bbox[0] - half_w), int(bbox[1] - half_h)),
            #                           (int(bbox[0] + half_w), int(bbox[1] + half_h)), color=(0, 255, 0), thickness=3)
            #             cv2.putText(check, text="{}".format(class_inds[bbox_idx]),
            #                         org=(int(bbox[0] + half_w)-10, int(bbox[1] + half_h)-30), thickness=2, color=(0, 255, 0),
            #                         fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7)
            #
            #     cv2.imshow("check_aug", check)
            #     cv2.waitKey(0)
            #     cv2.destroyAllWindows()
            #     cv2.imwrite(os.path.join(r"D:\Public\JHS\SIMPLE_DATA_INPUT_CHECK", "{}.jpg".format(batch_idx)), check*255)

            train_step(image_data, target)

        # for image_data, target in testset:
        #     test_step(image_data, target)

        ### for loss graph
        """
        if steps_in_epoch >= steps_per_epoch:
            loss_tracker.append(losses_in_epoch / steps_per_epoch)

            plt.plot(loss_tracker)
            plt.xlabel('epoch')
            plt.ylabel('loss')
            # plt.show()
            plt.savefig("D:/checkpoint/loss.png")
        """

        # print("steps_in_epoch:{}, steps_per_epoch:{}".format(global_steps, steps_per_epoch))
        # print("prev_minloss:{}, epoch_loss:{}".format(prev_minloss, epoch_loss))
        # # early stopping
        # if len(losses_in_epoch) >= steps_per_epoch:
        #
        #     epoch_loss = losses_in_epoch / steps_per_epoch
        #     if prev_minloss > epoch_loss:   # save best weight
        #         prev_minloss = epoch_loss
        #
        #         if epoch > 800:  # 최소학습 에폭
        #             model.save("D:\ckpt_best")
        #             print("{} epoch save best weights".format(epoch))
        #
        #     if len(loss_tracker) > patience:
        #         print("check loss_tracker len:{}".format(len(loss_tracker)))
        #         if loss_tracker[0] > np.min(loss_tracker[1:]):
        #             loss_tracker.pop(0)
        #         else:
        #             print("total loss didn't decreased during {} epochs. train stop".format(patience))
        #             return
        #     steps_in_epoch = 0
        #     epoch_loss = train_loss
        # else:
        #     epoch_loss += train_loss

        if (epoch+1) % 500 == 0:
            model.save(r"D:\notf_ckpt-epoch{}".format(epoch))
            print("{} epoch model saved".format(epoch))
    model.save(r"D:\notf_ckpt-last")
コード例 #4
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    trainset = Dataset('train')
    testset = Dataset('test')
    
    logdir = "./data/log"
    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    #global_steps = 
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
    # train_steps = (first_stage_epochs + second_stage_epochs) * steps_per_period

    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    STRIDES         = np.array(cfg.YOLO.STRIDES)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH
    XYSCALE         = cfg.YOLO.XYSCALE
    ANCHORS         = utils.get_anchors(cfg.YOLO.ANCHORS)

    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    
    #YOLOV4
    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
        bbox_tensors.append(fm)
        bbox_tensors.append(bbox_tensor)
        
    model = tf.keras.Model(input_layer, bbox_tensors)

    if FLAGS.weights == "none":
        print("Training from scratch")
    else:
        if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) - 1] == "weights":
            if FLAGS.tiny:
                utils.load_weights_tiny(model, FLAGS.weights)
            else:
                if FLAGS.model == 'yolov3':
                    utils.load_weights_v3(model, FLAGS.weights)
                else:
                    utils.load_weights(model, FLAGS.weights)
        else:
            model.load_weights(FLAGS.weights)
        print('Restoring weights from: %s ... ' % FLAGS.weights)

    if os.path.exists(logdir): 
        shutil.rmtree(logdir)

    optimizer   = tf.keras.optimizers.Adam()
    writer      = tf.summary.create_file_writer(logdir)
    ckpt        = tf.train.Checkpoint(step=tf.Variable(1, trainable=False, dtype=tf.int64), optimizer=optimizer, net=model)
    manager     = tf.train.CheckpointManager(ckpt, './checkpoints', max_to_keep=10)

    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(3):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss
        
            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            tf.print("=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (ckpt.step, optimizer.lr.numpy(),
                                                               giou_loss, conf_loss,
                                                               prob_loss, total_loss))
            # update learning rate
            ckpt.step.assign_add(1)
            if ckpt.step < warmup_steps:
                lr = ckpt.step / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                    (1 + tf.cos((ckpt.step - warmup_steps) / (total_steps - warmup_steps) * np.pi))
                )
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=ckpt.step)
                tf.summary.scalar("loss/total_loss", total_loss, step=ckpt.step)
                tf.summary.scalar("loss/giou_loss", giou_loss, step=ckpt.step)
                tf.summary.scalar("loss/conf_loss", conf_loss, step=ckpt.step)
                tf.summary.scalar("loss/prob_loss", prob_loss, step=ckpt.step,)
            writer.flush()

    def test_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0
            
            # optimizing process
            for i in range(3):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (ckpt.step, giou_loss, conf_loss,
                                                               prob_loss, total_loss))      

    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    for epoch in range(first_stage_epochs + second_stage_epochs):
        ckpt.step.assign_add(1)
        print("----------- EPOCH :: ", str(epoch), " :: -----------")

        if epoch < first_stage_epochs:
            print("epoch < first_stage_epochs")
            if not isfreeze:
                isfreeze = True
                for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']:
                    freeze = model.get_layer(name)
                    freeze_all(freeze)
        elif epoch >= first_stage_epochs:
            print("epoch < first_stage_epochs")
            if isfreeze:
                isfreeze = False
                for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']:
                    freeze = model.get_layer(name)
                    unfreeze_all(freeze)

        for image_data, target in trainset:
            train_step(image_data, target)
            if int(ckpt.step) % 10 == 0:
                save_path = manager.save()
                print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
コード例 #5
0
def main(_argv):
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        # 텐서플로가 첫 번째 GPU만 사용하도록 제한
        try:
            tf.config.experimental.set_visible_devices(gpus[7], 'GPU')
        except RuntimeError as e:
            # 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
            print(e)
    
    trainset = Dataset(FLAGS, is_training=True)
    #print(next(iter(trainset)))
    #_train = next(iter(trainset)) #_train[0] = (3, 416, 416, 3), len(train[1] =3,

    #len(train[1][0])) =2,

    # _train[1][0][0].shape : (3, 26, 26, 3, 25)
    # _train[1][0][1].shape : (3, 150, 4)

    # len(_train[1][1]) =2,
    # _train[1][1][0].shape : (3, 26, 26, 3, 25)
    # _train[1][1][1].shape : (3, 150, 4)

    testset = Dataset(FLAGS, is_training=False)
    logdir = "./data/log"
    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
    # train_steps = (first_stage_epochs + second_stage_epochs) * steps_per_period

    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH

    freeze_layers = utils.load_freeze_layer(FLAGS.model, FLAGS.tiny)
    #yolov4 : ['conv2d_93', 'conv2d_101', 'conv2d_109']
    #yolov4_tiny: ['conv2d_17', 'conv2d_20']

    feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
# '''
#     import core.backbone as backbone
#     route_1, route_2, conv = backbone.cspdarknet53(input_layer)

#     route_1
#     Out[97]: <tf.Tensor 'Mul_355:0' shape=(None, 52, 52, 256) dtype=float32>

#     route_2
#     Out[98]: <tf.Tensor 'Mul_376:0' shape=(None, 26, 26, 512) dtype=float32>

#     conv
#     Out[99]: <tf.Tensor 'LeakyRelu_164:0' shape=(None, 13, 13, 512) dtype=float32>

# '''
    #yolov4 : [<tf.Tensor 'conv2d_93/BiasAdd:0' shape=(None, 52, 52, 75) dtype=float32>,
    #<tf.Tensor 'conv2d_101/BiasAdd:0' shape=(None, 26, 26, 75) dtype=float32>,
    #<tf.Tensor 'conv2d_109/BiasAdd:0' shape=(None, 13, 13, 75) dtype=float32>]

    #yolov4_tiny : [<tf.Tensor 'conv2d_130/BiasAdd:0' shape=(None, 26, 26, 75) dtype=float32>,
    #<tf.Tensor 'conv2d_127/BiasAdd:0' shape=(None, 13, 13, 75) dtype=float32>]

    if FLAGS.tiny:
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    else:
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            elif i == 1:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    # from contextlib import redirect_stdout
    # with open('modelsummary.txt', 'w') as f:
    #     with redirect_stdout(f):
    #         model.summary()

    if FLAGS.weights == None:
        print("Training from scratch")
    else:
        if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) - 1] == "weights":
            utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
        else:
            model.load_weights(FLAGS.weights)
        print('Restoring weights from: %s ... ' % FLAGS.weights)

    # wf = open(FLAGS.weights, 'rb')
    # major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
    # wf.close()
    # layer_size = 110
    # output_pos = [93, 101, 109]
    # j = 0
    # for i in range(layer_size):
    #     conv_layer_name = 'conv2d_%d' %i if i > 0 else 'conv2d'
    #     bn_layer_name = 'batch_normalization_%d' %j if j > 0 else 'batch_normalization'
    #     conv_layer = model.get_layer(conv_layer_name)
    #     filters = conv_layer.filters
    #     k_size = conv_layer.kernel_size[0]
    #     in_dim = conv_layer.input_shape[-1]



    optimizer = tf.keras.optimizers.Adam()
    if os.path.exists(logdir): shutil.rmtree(logdir)
    writer = tf.summary.create_file_writer(logdir)

    # define training step function
    # @tf.function
    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                # print(pred)

                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            tf.print("=> STEP %4d/%4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, total_steps, optimizer.lr.numpy(),
                                                               giou_loss, conf_loss,
                                                               prob_loss, total_loss))
            # update learning rate
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
                )
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
                tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
                tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
                tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
            writer.flush()
    def test_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
                                                               prob_loss, total_loss))

    for epoch in range(first_stage_epochs + second_stage_epochs):
        if epoch < first_stage_epochs:
            if not isfreeze:
                isfreeze = True
                for name in freeze_layers:
                    freeze = model.get_layer(name)
                    freeze_all(freeze)
        elif epoch >= first_stage_epochs:
            if isfreeze:
                isfreeze = False
                for name in freeze_layers:
                    freeze = model.get_layer(name)
                    unfreeze_all(freeze)
        for image_data, target in trainset:
            train_step(image_data, target)
        #for image_data, target in testset:
        #    test_step(image_data, target)
        model.save_weights("./checkpoints/yolov4")
コード例 #6
0
def main(_argv):
    input_channel = 3
    patience = 30
    steps_in_epoch = 0
    epoch_loss = 0
    prev_minloss = np.inf

    trainset = DatasetTF(FLAGS, input_channel, is_training=True)
    testset = DatasetTF(FLAGS, input_channel, is_training=False)

    # testset = Dataset(FLAGS, input_channel, is_training=False)
    logdir = "./data/log_tf"
    isfreeze = False
    steps_per_epoch = len(trainset)
    ## for plot graph , early stopping
    loss_tracker = []

    # print("steps_per_epoch:{}".format(steps_per_epoch))

    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
    # train_steps = (first_stage_epochs + second_stage_epochs) * steps_per_period

    input_layer = tf.keras.layers.Input(
        [cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH

    freeze_layers = utils.load_freeze_layer(FLAGS.model,
                                            FLAGS.num_detection_layer)
    # feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
    feature_maps = YOLOv4_more_tiny(input_layer, NUM_CLASS)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):  # fm shape: (None, 40, 40, 21)
        bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS,
                                   STRIDES, ANCHORS, i, XYSCALE)
        bbox_tensors.append(fm)
        bbox_tensors.append(bbox_tensor)

    if cfg.YOLO.NUM_YOLOLAYERS == 3:  # yolov4
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8,
                                           NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            elif i == 1:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16,
                                           NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32,
                                           NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    elif cfg.YOLO.NUM_YOLOLAYERS == 2:  # yolo tiny
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            if i == 0:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16,
                                           NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            else:
                bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32,
                                           NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
    elif cfg.YOLO.NUM_YOLOLAYERS == 1:  # custom yolo
        bbox_tensors = []
        bbox_tensor = decode_train(
            feature_maps[0],
            cfg.TRAIN.INPUT_SIZE // cfg.YOLO.STRIDES_CUSTOM[0], NUM_CLASS,
            STRIDES, ANCHORS, 0, XYSCALE)
        bbox_tensors.append(feature_maps[0])
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()

    try:
        if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) -
                                    1] == "weights":
            utils.load_weights(model, FLAGS.weights, FLAGS.model,
                               FLAGS.num_detection_layer)
        else:
            model.load_weights(FLAGS.weights)
        print('Restoring weights from: %s ... ' % FLAGS.weights)
    except Exception:
        print("Training from scratch")
        FLAGS.weights = None

    optimizer = tf.keras.optimizers.Adam()

    if os.path.exists(logdir): shutil.rmtree(logdir)
    writer = tf.summary.create_file_writer(logdir)

    # define training step function
    @tf.function
    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            ciou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(
                    pred,
                    conv,
                    target[i][0],
                    target[i][1],
                    STRIDES=STRIDES,
                    NUM_CLASS=NUM_CLASS,  # label, bbox
                    IOU_LOSS_THRESH=IOU_LOSS_THRESH,
                    i=i)
                ciou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = ciou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)

            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))

            tf.print("=> STEP ", global_steps, "/", total_steps, "  lr: ",
                     optimizer.lr, "   ciou_loss: ", ciou_loss,
                     "   conf_loss: ", conf_loss, "    prob_loss: ", prob_loss,
                     "   total_loss: ", total_loss)
            # update learning rate
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + 0.5 * (
                    cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * ((1 + tf.cos(
                        (global_steps - warmup_steps) /
                        (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(tf.cast(lr, tf.float32))

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss",
                                  total_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/ciou_loss",
                                  ciou_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/conf_loss",
                                  conf_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/prob_loss",
                                  prob_loss,
                                  step=global_steps)
            writer.flush()
            return total_loss

    @tf.function
    def test_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            ciou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(len(freeze_layers)):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          target[i][0],
                                          target[i][1],
                                          STRIDES=STRIDES,
                                          NUM_CLASS=NUM_CLASS,
                                          IOU_LOSS_THRESH=IOU_LOSS_THRESH,
                                          i=i)
                # metric_items = compute_map(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, i=i)
                # loss_items = compute_loss(pred, conv, target[1][0], target[1][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                ciou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = ciou_loss + conf_loss + prob_loss

            tf.print("==> TEST STEP: ", global_steps)
            tf.print(" ciou_loss: ", ciou_loss)
            tf.print(" conf_loss: ", conf_loss)
            tf.print(" prob_loss: ", prob_loss)
            tf.print("==> total_loss", total_loss)

            # tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
            #          "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
            #                                                    prob_loss, total_loss))

    it = iter(trainset.get_dataset())
    it_test = iter(testset.get_dataset())
    start = time.time()
    for epoch in range(first_stage_epochs + second_stage_epochs):

        train_loss = 0.
        if epoch < first_stage_epochs:
            if not isfreeze:
                isfreeze = True
                for name in freeze_layers:
                    try:  # try구문 추가
                        freeze = model.get_layer(name)
                        freeze_all(freeze)
                    except ValueError:
                        pass
        elif epoch >= first_stage_epochs:
            if isfreeze:
                isfreeze = False
                for name in freeze_layers:
                    try:  # try구문 추가
                        freeze = model.get_layer(name)
                        freeze_all(freeze)
                    except ValueError:
                        pass
                    # freeze = model.get_layer(name)
                    # unfreeze_all(freeze)

        for i in range(trainset.steps_for_train):
            data = it.get_next()

            ### check input train images
            # image_data = data[0]
            # label_data = np.array(data[1])
            #
            # bboxes_list = np.array(label_data[0][1])
            # label_list = np.array(label_data[0][0])
            # for batch_idx, bboxes in enumerate(bboxes_list):
            #     class_inds = []
            #     check = np.array(image_data[batch_idx]).reshape(cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3)
            #     # print(r"D:\tf_data_\preprocessed_{}.npy".format(batch_idx))
            #     # np.save(r"D:\tf_data_\image_{}.npy".format(batch_idx), check)
            #     # np.save(r"D:\tf_data_\preprocessed_{}.npy".format(batch_idx), label_list[batch_idx])
            #     # np.save(r"D:\tf_data_\bboxes_{}.npy".format(batch_idx), bboxes_list[batch_idx])
            #     label = np.array(label_list[batch_idx])
            #
            #     class_ = label[...,5:]
            #     class_ = np.array(class_).flatten()
            #     class_ = np.where(class_ > 0.1, class_, 0)
            #
            #     for class_idx, class_label in enumerate(class_):
            #         # print("class_label:{}".format(class_label))
            #         if class_label > 0:
            #             class_inds.append(class_idx % cfg.YOLO.NUM_CLASSES)
            #
            #     bboxes = np.array(bboxes)
            #     for bbox_idx, bbox in enumerate(bboxes):
            #         half_h = bbox[3] / 2
            #         half_w = bbox[2] / 2
            #
            #         if half_h + half_w > 0:
            #             cv2.rectangle(check, (int(bbox[0] - half_w), int(bbox[1] - half_h)),
            #                           (int(bbox[0] + half_w), int(bbox[1] + half_h)), color=(0, 255, 0), thickness=3)
            #             cv2.putText(check, text="{}".format(class_inds),
            #                         org=(int(bbox[0] + half_w)-10, int(bbox[1] + half_h)-30), thickness=2, color=(0, 255, 0),
            #                         fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7)
            #
            #     cv2.imshow("check_aug", check)
            #     cv2.waitKey(0)
            #     cv2.destroyAllWindows()
            # cv2.imwrite(os.path.join(r"D:\Public\JHS\SIMPLE_DATA_INPUT_CHECK", "{}.jpg".format(batch_idx)), check*255)

            train_loss = train_step(data[0], data[1])
            # epoch_loss += train_loss

        # for i in range(testset.steps_for_train):
        #     data = it_test.get_next()
        #     test_step(data[0], data[1])

        # CHECK TEST
        # image_data = data[0]
        # label_data = data[1]
        # bboxes_list = label_data[0][1]
        #
        # for batch_idx, bboxes in enumerate(bboxes_list):
        #     check = np.array(image_data[batch_idx]).reshape(640, 640, 3)
        #     for bbox in bboxes:
        #         half_h = bbox[3] / 2
        #         half_w = bbox[2] / 2
        #         # bbox_upper_left = (bbox[0])
        #         if np.sum(bbox) > 0:
        #             cv2.rectangle(check, (bbox[0] - half_w, bbox[1] - half_h),(bbox[0] + half_w,bbox[1]+half_h), color=(0,255,0), thickness=3)
        #     cv2.imshow("check_aug TEST", check)
        #     cv2.waitKey(0)
        #     cv2.destroyAllWindows()

        # for image_data, target in testset:
        #     test_step(image_data, target)

        # for loss graph
        """
        if steps_in_epoch >= steps_per_epoch:
            loss_tracker.append(losses_in_epoch / steps_per_epoch)
            
            ## 밑으로
            # plt.plot(loss_tracker)
            # plt.xlabel('epoch')
            # plt.ylabel('loss')
            # # plt.show()
            # plt.savefig("D:/checkpoint/loss.png")
        """

        # early stopping
        """
        epoch_loss = epoch_loss / steps_per_epoch
        loss_tracker.append(epoch_loss)
        if prev_minloss > epoch_loss:   # save best weight
            prev_minloss = epoch_loss

            if epoch > 2000:  # 최소학습 에폭
                model.save("D:\ckpt_best")
                print("{} epoch save best weights".format(epoch))

        if len(loss_tracker) > patience:
            print("check loss_tracker len:{}".format(len(loss_tracker)))
            if loss_tracker[0] > loss_tracker[-1]:
                loss_tracker.pop(0)
            else:
                print("total loss didn't decreased during {} epochs. train stop".format(patience))
                return
            epoch_loss = 0.
        else:
            epoch_loss += train_loss
        steps_in_epoch = 0
        """

        if (epoch + 1) % 100 == 0:
            model.save("D:\yolov4-tflite-train_tf-epoch{}".format(epoch + 1))
            print("{} epoch model saved".format(epoch + 1))
            print("consumed time: {}".format(time.time() - start))
    model.save(r"D:\yolov4-tflite-train_tf-last")
コード例 #7
0
def main(_argv):

    strategy = tf.distribute.MirroredStrategy()
    print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

    train_set = Dataset(FLAGS, is_training=True)
    train_set = tf.data.Dataset.from_generator(train_set,
                                               output_types=(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,
                                                             tf.float32, tf.float32)).prefetch(
        strategy.num_replicas_in_sync)
    train_set = strategy.experimental_distribute_dataset(train_set)

    test_set = Dataset(FLAGS, is_training=False)

    logdir = "./data/log"
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
    global_steps = 1

    with strategy.scope():
        input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
        STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
        IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH

        freeze_layers = utils.load_freeze_layer(FLAGS.model, FLAGS.tiny)

        feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
        if FLAGS.tiny:
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                if i == 0:
                    bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
                else:
                    bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
                bbox_tensors.append(fm)
                bbox_tensors.append(bbox_tensor)
        else:
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                if i == 0:
                    bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
                elif i == 1:
                    bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
                else:
                    bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
                bbox_tensors.append(fm)
                bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(input_layer, bbox_tensors)
        model.summary()

        if FLAGS.weights is None:
            print("Training from scratch")
        else:
            if FLAGS.weights.split(".")[len(FLAGS.weights.split(".")) - 1] == "weights":
                utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
            else:
                model.load_weights(FLAGS.weights)
            print('Restoring weights from: %s ... ' % FLAGS.weights)

        optimizer = tf.keras.optimizers.Adam()
        if os.path.exists(logdir): shutil.rmtree(logdir)
        writer = tf.summary.create_file_writer(logdir)

        def train_step(image_data, small_target, medium_target, larget_target):
            with tf.GradientTape() as tape:
                pred_result = model(image_data, training=True)
                giou_loss = conf_loss = prob_loss = 0

                # optimizing process
                all_targets = small_target, medium_target, larget_target
                for i in range(len(freeze_layers)):
                    conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                    loss_items = compute_loss(pred, conv, all_targets[i][0], all_targets[i][1],
                                              STRIDES=STRIDES, NUM_CLASS=NUM_CLASS,
                                              IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                    giou_loss += loss_items[0]
                    conf_loss += loss_items[1]
                    prob_loss += loss_items[2]

                total_loss = giou_loss + conf_loss + prob_loss

                gradients = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(zip(gradients, model.trainable_variables))
                return total_loss

        @tf.function
        def distributed_train_step(image_data, small_target, medium_target, larget_target):
            per_replica_losses = strategy.run(train_step, args=(image_data, small_target, medium_target, larget_target))
            total_loss = strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,
                                         axis=None)
            return total_loss

        def test_step(image_data, small_target, medium_target, larget_target):
            with tf.GradientTape() as tape:
                pred_result = model(image_data, training=True)
                giou_loss = conf_loss = prob_loss = 0

                # optimizing process
                all_targets = small_target, medium_target, larget_target
                for i in range(len(freeze_layers)):
                    conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                    loss_items = compute_loss(pred, conv, all_targets[i][0], all_targets[i][1], STRIDES=STRIDES,
                                              NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                    giou_loss += loss_items[0]
                    conf_loss += loss_items[1]
                    prob_loss += loss_items[2]

                total_loss = giou_loss + conf_loss + prob_loss

                tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                         "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
                                                                   prob_loss, total_loss))

        for epoch in range(first_stage_epochs + second_stage_epochs):
            for image_data, label_sbbox, sbboxes, label_mbbox, mbboxes, label_lbbox, lbboxes in train_set:
                total_loss = distributed_train_step(image_data,
                                                    (label_sbbox, sbboxes),
                                                    (label_mbbox, mbboxes),
                                                    (label_lbbox, lbboxes))
                global_steps += 1
                tf.print("=> STEP %4d   lr: %.6f   total_loss: %4.2f\t" % (global_steps,
                                                                           optimizer.lr.numpy(),
                                                                           total_loss),
                         str(dt.datetime.now()))

                # writing summary data
                with writer.as_default():
                    tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                    tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
                writer.flush()

            for image_data, label_sbbox, sbboxes, label_mbbox, mbboxes, label_lbbox, lbboxes in test_set:
                test_step(image_data, (label_sbbox, sbboxes),
                                      (label_mbbox, mbboxes),
                                      (label_lbbox, lbboxes))
            if (epoch + 1) % 10 == 0:
                model.save_weights(f"./checkpoints/v4/yolov4_{epoch+1}")