Example #1
0
if __name__ == '__main__':
    print("Train start\n")
    # GPU settings
    gpus = tf.config.list_physical_devices("GPU")
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    print("Generate the Data Set\n")
    dataset = TFDataset()
    train_data, train_count = dataset.generate_datatset()

    ssd = SSD()
    print_model_summary(network=ssd)

    if load_weights_before_training:
        ssd.load_weights(filepath=save_model_dir+"epoch-{}".format(load_weights_from_epoch))
        print("Successfully load weights!")
    else:
        load_weights_from_epoch = -1

    # loss
    loss = SSDLoss()

    # optimizer
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=1e-3,
                                                                 decay_steps=20000,
                                                                 decay_rate=0.96)
    optimizer = tf.optimizers.Adam(learning_rate=lr_schedule)

    # metrics
    loss_metric = tf.metrics.Mean()
Example #2
0
def main():
    dataset = TFDataset()
    train_data, train_count = dataset.generate_datatset()

    model = SSD()
    print_model_summary(model)

    if load_weights_from_epoch >= 0:
        model.load_weights(filepath=save_model_dir +
                           "epoch-{}".format(load_weights_from_epoch))
        print("成功从epoch-{}加载模型权重!".format(load_weights_from_epoch))

    loss_fn = MultiBoxLoss(num_classes=NUM_CLASSES,
                           overlap_thresh=0.5,
                           neg_pos=3)

    # optimizer
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=1e-3, decay_steps=20000, decay_rate=0.96)
    optimizer = tf.optimizers.Adam(learning_rate=lr_schedule)

    loss_metric = tf.metrics.Mean()
    cls_loss_metric = tf.metrics.Mean()
    reg_loss_metric = tf.metrics.Mean()

    for epoch in range(load_weights_from_epoch + 1, EPOCHS):
        start_time = time.time()
        for step, batch_data in enumerate(train_data):
            images, labels = ReadDataset().read(batch_data)

            with tf.GradientTape() as tape:
                predictions = model(images, training=True)
                loss_l, loss_c = loss_fn(y_true=labels, y_pred=predictions)
                total_loss = loss_l + loss_c
            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(
                grads_and_vars=zip(gradients, model.trainable_variables))
            loss_metric.update_state(values=total_loss)
            cls_loss_metric.update_state(values=loss_c)
            reg_loss_metric.update_state(values=loss_l)

            time_per_step = (time.time() - start_time) / (step + 1)
            print(
                "Epoch: {}/{}, step: {}/{}, speed: {:.2f}s/step, loss: {:.10f}, "
                "cls loss: {:.10f}, reg loss: {:.10f}".format(
                    epoch, EPOCHS, step,
                    tf.math.ceil(train_count / BATCH_SIZE), time_per_step,
                    loss_metric.result(), cls_loss_metric.result(),
                    reg_loss_metric.result()))
        loss_metric.reset_states()
        cls_loss_metric.reset_states()
        reg_loss_metric.reset_states()

        if epoch % save_frequency == 0:
            model.save_weights(filepath=save_model_dir +
                               "epoch-{}".format(epoch),
                               save_format="tf")

        if test_images_during_training:
            visualize_training_results(pictures=test_images_dir_list,
                                       model=model,
                                       epoch=epoch)

    model.save_weights(filepath=save_model_dir + "epoch-{}".format(EPOCHS),
                       save_format="tf")
Example #3
0
    if is_object_exist:
        image_with_boxes = draw_boxes_on_image(cv2.imread(picture_dir), boxes, scores, classes)
    else:
        print("No objects were detected.")
        image_with_boxes = cv2.imread(picture_dir)
    return image_with_boxes


if __name__ == '__main__':
    # GPU settings
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        # 텐서플로가 첫 번째 GPU만 사용하도록 제한
        try:
            print('start with GPU 4')
            tf.config.experimental.set_visible_devices(gpus[4], 'GPU')
            tf.config.experimental.set_memory_growth(gpus[4], True)
        except RuntimeError as e:
            # 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
            print(e)

    ssd_model = SSD()
    print_model_summary(network=ssd_model, shape=(None, 224, 224, 3))
    ssd_model.load_weights(filepath=save_model_dir+"mobilenet_v2_new-epoch-49.h5")

    image = test_single_picture(picture_dir=test_picture_dir, model=ssd_model)

    cv2.namedWindow("detect result", flags=cv2.WINDOW_NORMAL)
    cv2.imshow("detect result", image)
    cv2.waitKey(0)
Example #4
0
        # 텐서플로가 첫 번째 GPU만 사용하도록 제한
        try:
            print('start with GPU 4')
            tf.config.experimental.set_visible_devices(gpus[4], 'GPU')
            tf.config.experimental.set_memory_growth(gpus[4], True)
        except RuntimeError as e:
            # 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
            print(e)

    ssd = SSD()
    dataset = TFDataset()
    train_data, train_count = dataset.generate_datatset()
    print_model_summary(network=ssd, shape=(None, 224, 224, 3))

    if load_weights_before_training:
        ssd.load_weights(filepath=save_model_dir +
                         "mobilenet_v2_new-epoch-{}.h5".format(49))
        print("Successfully load weights!")
    else:
        load_weights_from_epoch = -1

    # loss
    loss = SSDLoss()

    # optimizer
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=1e-3, decay_steps=20000, decay_rate=0.96)
    optimizer = tf.optimizers.Adam(learning_rate=lr_schedule)

    # metrics
    loss_metric = tf.metrics.Mean()
    cls_loss_metric = tf.metrics.Mean()
Example #5
0

def test_single_picture(picture_dir, model):
    image_tensor = preprocess_image(picture_dir)
    image_tensor = tf.expand_dims(image_tensor, axis=0)
    procedure = InferenceProcedure(model=model)
    is_object_exist, boxes, scores, classes = procedure.get_final_boxes(image=image_tensor)
    if is_object_exist:
        image_with_boxes = draw_boxes_on_image(cv2.imread(picture_dir), boxes, scores, classes)
    else:
        print("No objects were detected.")
        image_with_boxes = cv2.imread(picture_dir)
    return image_with_boxes


if __name__ == '__main__':
    # GPU settings
    gpus = tf.config.list_physical_devices("GPU")
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)

    ssd_model = SSD()
    ssd_model.load_weights(filepath=save_model_dir+"saved_model")

    image = test_single_picture(picture_dir=test_picture_dir, model=ssd_model)

    cv2.namedWindow("detect result", flags=cv2.WINDOW_NORMAL)
    cv2.imshow("detect result", image)
    cv2.waitKey(0)
Example #6
0
            # 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
            print(e)

    predicted_dir_path = '../mAP/predicted'
    ground_truth_dir_path = '../mAP/ground-truth'
    if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
    if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
    if os.path.exists("./dataset/detection/"): shutil.rmtree("./dataset/detection/")

    os.mkdir(predicted_dir_path)
    os.mkdir(ground_truth_dir_path)
    os.mkdir("./dataset/detection/")

    ssd_model = SSD()
    print_model_summary(network=ssd_model, shape=(None, 224, 224, 3))
    ssd_model.load_weights(filepath=save_model_dir + "vgg16-epoch-49.h5")
    dataset = TFDataset()
    test_data, train_count = dataset.generate_datatset()


    # image = test_single_picture(picture_dir=test_picture_dir, model=ssd_model)

    def test_step(batch_images, batch_labels, name):
        print('=> ground truth of %s:' % name[0])
        ground_truth_path = os.path.join(ground_truth_dir_path, str(name[0]) + '.txt')
        with open(ground_truth_path, 'w') as f:
            i = 0
            while batch_labels[0][i][4] != -1:
                class_name = str(find_class_name(batch_labels[0][i][4]))
                xmin, ymin, xmax, ymax = list(map(str, batch_labels[0][i][:4]))
                bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
Example #7
0
    boxes = resize_box(boxes, h, w)
    classes = tf.cast(results[:, -1], dtype=tf.int32).numpy()
    # print("检测结果:boxes: {}, classes: {}, scores: {}".format(boxes, classes, scores))

    image_with_boxes = draw_boxes_on_image(image_array, boxes, scores, classes)

    return image_with_boxes


if __name__ == '__main__':
    gpus = tf.config.list_physical_devices('GPU')
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPUs")
        except RuntimeError as e:
            print(e)

    ssd_model = SSD()
    # 始终加载最新的权重文件
    last_epoch = os.listdir(save_model_dir)[-2].split(".")[0]
    ssd_model.load_weights(filepath=save_model_dir + last_epoch)
    image = test_single_picture(picture_dir=test_picture_dir, model=ssd_model)

    cv2.namedWindow("detect result", flags=cv2.WINDOW_NORMAL)
    cv2.imshow("detect result", image)
    cv2.waitKey(0)
Example #8
0
def test_single_picture(picture_dir, model):
    image_tensor = preprocess_image(picture_dir)
    image_tensor = tf.expand_dims(image_tensor, axis=0)
    procedure = InferenceProcedure(model=model)
    is_object_exist, boxes, scores, classes = procedure.get_final_boxes(
        image=image_tensor)
    if is_object_exist:
        image_with_boxes = draw_boxes_on_image(cv2.imread(picture_dir), boxes,
                                               scores, classes)
    else:
        print("No objects were detected.")
        image_with_boxes = cv2.imread(picture_dir)
    return image_with_boxes


if __name__ == '__main__':
    # GPU settings
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)

    ssd_model = SSD()
    ssd_model.load_weights(filepath=save_model_dir + 'epoch-0')

    image = test_single_picture(picture_dir=test_picture_path, model=ssd_model)
    cv2.imwrite('dataset/result.png', image)
    # cv2.namedWindow("detect result", flags=cv2.WINDOW_NORMAL)
    # cv2.imshow("detect result", image)
    # cv2.waitKey(0)