コード例 #1
0
def _test_attention_classification(model: AttentionClassificationModel, batch_size: int, n_classes=11,
                                   image_size=(96, 96)):
    model = try_cuda(model)
    expected_prob_tensor_shape = (batch_size, n_classes)
    expected_label_tensor_shape = (batch_size,)
    expected_attention_shape_length = 3  # (batch size, height, width)

    test_tensor = try_cuda(gen_random_tensor(batch_size, image_size))

    labels, probs, attention_map = model.predict_label_and_heatmap(test_tensor)
    assert_tensor_shape(labels, expected_label_tensor_shape, "output label shape")
    assert_tensor_shape(probs, expected_prob_tensor_shape, "output prob shape")
    assert len(attention_map.shape) == expected_attention_shape_length
コード例 #2
0
def _test_segmentation(model: SegmentationModel,
                       batch_size: int,
                       n_classes=11,
                       image_size=(256, 256)):
    model = try_cuda(model)
    expected_prob_tensor_shape = (batch_size, n_classes) + image_size
    expected_label_tensor_shape = (batch_size, ) + image_size

    test_tensor = try_cuda(gen_random_tensor(batch_size, image_size))

    labels, probs = model.predict_index_image(test_tensor)
    assert_tensor_shape(labels, expected_label_tensor_shape,
                        "output label shape")
    assert_tensor_shape(probs, expected_prob_tensor_shape, "output prob shape")
コード例 #3
0
def test_efficientdet():
    model: EfficientDetector = try_cuda(EfficientDetector(n_classes=n_classes, score_threshold=1e-2))
    test_image = gen_random_tensor(1, image_size)
    result = model.predict_bboxes(test_image)

    assert result[0].ndim == 2 and result[0].shape[
        -1] == 6, "Detection model result must contain (xmin, ymin, xmax, ymax, class, score)"
コード例 #4
0
def _test_classification(model: ClassificationModel, batch_size: int, n_classes=11, image_size=(96, 96)):
    expected_prob_tensor_shape = (batch_size, n_classes)
    expected_label_tensor_shape = (batch_size,)

    test_tensor = try_cuda(gen_random_tensor(batch_size, image_size))

    labels, probs = model.predict_label(test_tensor)
    assert_tensor_shape(labels, expected_label_tensor_shape, "output label shape")
    assert_tensor_shape(probs, expected_prob_tensor_shape, "output prob shape")
コード例 #5
0
def get_logger(log_prefix: str, args, model: BaseDeepextModel):
    test_tensor = try_cuda(torch.randn(1, 3, args.image_size, args.image_size))
    if args.log_type == "mlflow":
        logger = MLFlowLogger(
            experiment_name=
            f"{log_prefix}_{args.dataset}_{model.generate_model_name()}")
        # Log the model
        # with mlflow.start_run():
        #     mlflow.pytorch.log_model(model, "model")
        #
        #     # convert to scripted model and log the model
        #     scripted_pytorch_model = torch.jit.script(model)
        #     mlflow.pytorch.log_model(scripted_pytorch_model, "scripted_model")
        return logger
    elif args.log_type == "tensorboard":
        logger = TensorBoardLogger(
            save_dir="tensorboard_logs",
            version="v",
            name=
            f"segmentation_demo_{args.dataset}_{model.generate_model_name()}")
        logger.experiment.add_graph(model, test_tensor)
        return logger
    raise RuntimeError(f"Invalid log type: {args.log_type}")
コード例 #6
0
    Path(result_dir_path).mkdir()

label_names, label_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

transforms = AlbumentationsClsWrapperTransform(
    A.Compose([
        A.Resize(width=width, height=height),
        ToTensorV2(),
    ]))

dataset = ImageOnlyDataset(image_dir=images_dir_path,
                           image_transform=transforms)

print("Loading model...")
model: AttentionClassificationModel = try_cuda(
    AttentionBranchNetwork(
        n_classes=n_classes,
        backbone=BackBoneKey.RESNET_18).load_from_checkpoint(checkpoint_path))
print("Model loaded")

for i, img_tensor in enumerate(tqdm.tqdm(dataset)):
    origin_image = normalize255(tensor_to_cv(img_tensor))
    label, prob, attention_map = model.predict_label_and_heatmap(
        to_4dim(img_tensor))
    attention_map = normalize255(tensor_to_cv(attention_map[0]))
    blend_img = model.generate_heatmap_image(origin_image, attention_map)
    result_image = cv2.resize(blend_img, dataset.current_image_size())
    cv2.imwrite(f"{result_dir_path}/{label_names[label[0]]}_{i}.jpg",
                result_image)
コード例 #7
0
    train_dataset, test_dataset = DatasetSplitter().split_train_test(test_ratio,
                                                                     root_dataset,
                                                                     train_transforms=train_transforms,
                                                                     test_transforms=test_transforms)
else:
    train_dataset = IndexImageDataset.create(image_dir_path=train_images_dir,
                                             index_image_dir_path=train_annotations_dir,
                                             transforms=train_transforms)
    test_dataset = IndexImageDataset.create(image_dir_path=test_images_dir, index_image_dir_path=test_annotations_dir,
                                            transforms=test_transforms)

train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

# TODO Model detail params
model: SegmentationModel = try_cuda(
    ShelfNet(n_classes=n_classes, out_size=(height, width)))

if load_checkpoint_path and load_checkpoint_path != "":
    model = model.load_from_checkpoint(load_checkpoint_path)

# TODO Train detail params
# Metrics/Callbacks
val_every_n_epoch = 5
callbacks = [ModelCheckpoint(period=val_every_n_epoch, filename=f"{model.generate_model_name()}",
                             dirpath=save_checkpoint_dir_path, monitor='val_iou', verbose=True, mode="max"),
             GenerateSegmentationImageCallback(output_dir=progress_dir, per_epoch=5, model=model,
                                               dataset=test_dataset)]
logger = MLFlowLogger(experiment_name=f"segmentation_{model.generate_model_name()}")

# Training.
Trainer(max_epochs=epoch, callbacks=callbacks, gpus=-1,
コード例 #8
0
                                     annotation_dir_path=test_annotations_dir,
                                     class_index_dict=class_index_dict)

train_data_loader = DataLoader(train_dataset,
                               batch_size=batch_size,
                               shuffle=True,
                               collate_fn=AdjustDetectionTensorCollator())
test_data_loader = DataLoader(test_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              collate_fn=AdjustDetectionTensorCollator())

# TODO Model detail params
model: DetectionModel = try_cuda(
    EfficientDetector(n_classes=n_classes,
                      lr=lr,
                      network=f"efficientdet-d0",
                      score_threshold=0.5))

if load_checkpoint_path and load_checkpoint_path != "":
    model = model.load_from_checkpoint(load_checkpoint_path)

# TODO Train detail params
# Callbacks
val_every_n_epoch = 5
callbacks = [
    ModelCheckpoint(period=val_every_n_epoch,
                    filename=f"{model.generate_model_name()}",
                    dirpath=save_checkpoint_dir_path,
                    monitor='val_map',
                    verbose=True,
コード例 #9
0
result_file_path = os.environ.get("RESULT_FILE_PATH")
checkpoint_path = os.environ.get("CHECKPOINT_PATH")
label_file_path = os.environ.get("LABEL_FILE_PATH")
width, height = int(os.environ.get("IMAGE_WIDTH")), int(
    os.environ.get("IMAGE_HEIGHT"))

label_names, label_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

transforms = AlbumentationsClsWrapperTransform(
    A.Compose([
        A.Resize(width=width, height=height),
        ToTensorV2(),
    ]))

dataset = ImageOnlyDataset(image_dir=images_dir_path,
                           image_transform=transforms)

# TODO Choose model, parameters.
print("Loading model...")
model_class = model_service.resolve_classification_model(model_name)
model: ClassificationModel = try_cuda(
    model_class.load_from_checkpoint(checkpoint_path))
print("Model loaded")

with open(result_file_path, "w") as file:
    file.write(f"filepath,result label\n")
    for i, img_tensor in enumerate(tqdm.tqdm(dataset)):
        label, prob = model.predict_label(to_4dim(img_tensor))
        file.write(f"{dataset.current_file_path()},{label_names[label[0]]}\n")
コード例 #10
0
    args = parser.parse_args()

    dataset_info = SEGMENTATION_DATASET_INFO.get(args.dataset)
    if dataset_info is None:
        raise ValueError(f"Invalid dataset name - {args.dataset}.  Required [{list(SEGMENTATION_DATASET_INFO.keys())}]")

    label_names = dataset_info["label_names"]
    class_index_dict = label_names_to_dict(label_names)

    # Fetch dataset.
    train_transforms, test_transforms = build_transforms(args, dataset_info["n_classes"] + 1)
    train_dataset, test_dataset = build_dataset(args, train_transforms, test_transforms)
    train_data_loader, test_data_loader = build_data_loader(args, train_dataset, test_dataset)

    # Fetch model and load weight.
    model = try_cuda(build_model(args, dataset_info["n_classes"] + 1))  # include background class
    if args.load_checkpoint_path:
        model = model.load_from_checkpoint(args.load_checkpoint_path)

    # Training setting.
    logger = get_logger("segmentation_demo", args, model)
    callbacks = [ModelCheckpoint(period=args.val_every_n_epoch, filename=f"{model.generate_model_name()}",
                                 dirpath=args.save_checkpoint_path, monitor='val_iou', verbose=True, mode="max")]
    if args.progress_dir:
        callbacks.append(GenerateSegmentationImageCallback(output_dir=args.progress_dir, per_epoch=2, model=model,
                                                           dataset=test_dataset))
    # Training.
    Trainer(max_epochs=args.epoch, callbacks=callbacks, gpus=-1,
            check_val_every_n_epoch=args.val_every_n_epoch, logger=logger) \
        .fit(model, train_dataloader=train_data_loader, val_dataloaders=test_data_loader)
コード例 #11
0
checkpoint_path = os.environ.get("CHECKPOINT_PATH")
label_file_path = os.environ.get("LABEL_FILE_PATH")
width, height = int(os.environ.get("IMAGE_WIDTH")), int(os.environ.get("IMAGE_HEIGHT"))

if not Path(result_dir_path).exists():
    Path(result_dir_path).mkdir()

label_names, label_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

transforms = AlbumentationsClsWrapperTransform(A.Compose([
    A.Resize(width=width, height=height),
    ToTensorV2(),
]))

dataset = ImageOnlyDataset(image_dir=images_dir_path, image_transform=transforms)

print("Loading model...")
model_class = model_service.resolve_detection_model(model_name)
model: DetectionModel = try_cuda(model_class.load_from_checkpoint(checkpoint_path))
print("Model loaded")

for i, img_tensor in enumerate(tqdm.tqdm(dataset)):
    origin_image = normalize255(tensor_to_cv(img_tensor))
    result_bboxes = model.predict_bboxes(to_4dim(img_tensor))[0]
    result_img = model.generate_bbox_draw_image(origin_image, bboxes=result_bboxes,
                                                model_img_size=(width, height),
                                                label_names=label_names)
    result_img = cv2.resize(result_img, dataset.current_image_size())
    cv2.imwrite(f"{result_dir_path}/result_{i}.jpg", result_img)
コード例 #12
0
        )

    label_names = dataset_info["label_names"]
    class_index_dict = label_names_to_dict(label_names)

    # Fetch dataset.
    train_transforms, test_transforms = build_transforms(
        args, class_index_dict)
    train_dataset, test_dataset = build_dataset(args, train_transforms,
                                                test_transforms)
    train_data_loader, test_data_loader = build_data_loader(
        args, train_dataset, test_dataset, AdjustDetectionTensorCollator(),
        AdjustDetectionTensorCollator())

    # Fetch model and load weight.
    model = try_cuda(build_model(args, dataset_info["n_classes"]))
    if args.load_checkpoint_path:
        model = model.load_from_checkpoint(args.load_checkpoint_path)

    # Training setting.
    logger = get_logger("detection_demo", args, model)
    callbacks = [
        ModelCheckpoint(period=args.val_every_n_epoch,
                        filename=f"{model.generate_model_name()}",
                        dirpath=args.save_checkpoint_path,
                        monitor='val_map',
                        verbose=True,
                        mode="max")
    ]
    if args.progress_dir:
        callbacks.append(
コード例 #13
0
    RealtimeDetection

load_dotenv("envs/camera_prediction.env")

model_name = os.environ.get("MODEL_NAME")
checkpoint_path = os.environ.get("CHECKPOINT_PATH")
label_file_path = os.environ.get("LABEL_FILE_PATH")
width, height = int(os.environ.get("IMAGE_WIDTH")), int(
    os.environ.get("IMAGE_HEIGHT"))

label_names, label_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

print("Loading model...")
model_class = model_service.resolve_model_class_from_name(model_name)
model: BaseDeepextModel = try_cuda(
    model_class.load_from_checkpoint(checkpoint_path))
print("Model loaded")

if isinstance(model, SegmentationModel):
    RealtimeSegmentation(model=model,
                         img_size_for_model=(width, height)).realtime_predict(
                             video_output_path="output.mp4")
elif isinstance(model, DetectionModel):
    RealtimeDetection(model=model,
                      img_size_for_model=(width, height),
                      label_names=label_names).realtime_predict(
                          video_output_path="output.mp4")
elif isinstance(model, AttentionClassificationModel):
    RealtimeAttentionClassification(model=model,
                                    img_size_for_model=(width, height),
                                    label_names=label_names).realtime_predict(
コード例 #14
0
width, height = int(os.environ.get("IMAGE_WIDTH")), int(
    os.environ.get("IMAGE_HEIGHT"))
n_classes = int(os.environ.get("N_CLASSES"))

if not Path(result_dir_path).exists():
    Path(result_dir_path).mkdir()

transforms = AlbumentationsClsWrapperTransform(
    A.Compose([
        A.Resize(width=width, height=height),
        ToTensorV2(),
    ]))

dataset = ImageOnlyDataset(image_dir=images_dir_path,
                           image_transform=transforms)

# TODO Choose model, parameters.
print("Loading model...")
model_class = model_service.resolve_segmentation_model(model_name)
model: SegmentationModel = try_cuda(
    model_class.load_from_checkpoint(checkpoint_path))
print("Model loaded")

for i, img_tensor in enumerate(tqdm.tqdm(dataset)):
    origin_image = normalize255(tensor_to_cv(img_tensor))
    pred_label, prob = model.predict_index_image(to_4dim(img_tensor))
    index_image = tensor_to_cv(pred_label[0])
    result_img = model.generate_mixed_segment_image(origin_image, index_image)
    result_img = cv2.resize(result_img, dataset.current_image_size())
    cv2.imwrite(f"{result_dir_path}/result_{i}.jpg", result_img)