Пример #1
0
test_images_dir = os.environ.get("TEST_IMAGES_PATH")
test_annotations_dir = os.environ.get("TEST_ANNOTATIONS_PATH")
label_file_path = os.environ.get("LABEL_FILE_PATH")

load_checkpoint_path = os.environ.get("LOAD_CHECKPOINT_PATH")
save_checkpoint_dir_path = os.environ.get("SAVE_CHECKPOINT_DIR_PATH")
progress_dir = os.environ.get("PROGRESS_DIR_PATH")
# Model params
width, height = int(os.environ.get("IMAGE_WIDTH")), int(
    os.environ.get("IMAGE_HEIGHT"))
# Learning params
batch_size = int(os.environ.get("BATCH_SIZE"))
lr = float(os.environ.get("LR"))
epoch = int(os.environ.get("EPOCH"))

label_names, class_index_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

ignore_indices = [
    255,
]

# TODO Data augmentation
train_transforms = AlbumentationsDetectionWrapperTransform([
    A.HorizontalFlip(),
    A.RandomResizedCrop(width=width, height=height, scale=(0.8, 1.)),
    A.OneOf([
        A.Blur(blur_limit=5),
        A.RandomBrightnessContrast(),
        A.RandomGamma(),
    ]),
from deepext_with_lightning.image_process.convert import try_cuda, normalize255, tensor_to_cv, to_4dim
from deepext_with_lightning.dataset.functions import create_label_list_and_dict

load_dotenv("envs/attention_classification.env")

images_dir_path = os.environ.get("IMAGES_DIR_PATH")
result_dir_path = os.environ.get("RESULT_DIR_PATH")
checkpoint_path = os.environ.get("CHECKPOINT_PATH")
label_file_path = os.environ.get("LABEL_FILE_PATH")
width, height = int(os.environ.get("IMAGE_WIDTH")), int(
    os.environ.get("IMAGE_HEIGHT"))

if not Path(result_dir_path).exists():
    Path(result_dir_path).mkdir()

label_names, label_dict = create_label_list_and_dict(label_file_path)
n_classes = len(label_names)

transforms = AlbumentationsClsWrapperTransform(
    A.Compose([
        A.Resize(width=width, height=height),
        ToTensorV2(),
    ]))

dataset = ImageOnlyDataset(image_dir=images_dir_path,
                           image_transform=transforms)

print("Loading model...")
model: AttentionClassificationModel = try_cuda(
    AttentionBranchNetwork(
        n_classes=n_classes,
Пример #3
0
    args = parser.parse_args()

    # Fetch model and load weight.
    model_class = model_service.resolve_model_class_from_name(args.model)

    model: BaseDeepextModel = try_cuda(
        model_class.load_from_checkpoint(args.load_checkpoint_path))

    if isinstance(model, SegmentationModel):
        RealtimeSegmentation(
            model=model,
            img_size_for_model=(args.image_size,
                                args.image_size)).realtime_predict(
                                    video_output_path="output.mp4")
    elif isinstance(model, DetectionModel):
        label_names, label_dict = create_label_list_and_dict(
            args.label_names_path)
        RealtimeDetection(model=model,
                          img_size_for_model=(args.image_size,
                                              args.image_size),
                          label_names=label_names).realtime_predict(
                              video_output_path="output.mp4")
    if isinstance(model, AttentionClassificationModel):
        label_names, label_dict = create_label_list_and_dict(
            args.label_names_path)
        RealtimeAttentionClassification(
            model=model,
            img_size_for_model=(args.image_size, args.image_size),
            label_names=label_names).realtime_predict(
                video_output_path="output.mp4")
    elif isinstance(model, ClassificationModel):
        label_names, label_dict = create_label_list_and_dict(