コード例 #1
0
def test_predict(tmpdir, head):
    model = ObjectDetector(num_classes=2, head=head, pretrained=False)
    ds = DummyDetectionDataset((128, 128, 3), 1, 2, 10)

    input_transform = IceVisionInputTransform()

    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    dl = model.process_train_dataset(
        ds,
        2,
        num_workers=0,
        pin_memory=False,
        input_transform=input_transform,
    )
    trainer.fit(model, dl)

    dl = model.process_predict_dataset(
        ds,
        2,
        input_transform=input_transform,
    )
    predictions = trainer.predict(model, dl, output="preds")
    assert len(predictions[0][0]["bboxes"]) > 0
    model.predict_kwargs = {"detection_threshold": 2}
    predictions = trainer.predict(model, dl, output="preds")
    assert len(predictions[0][0]["bboxes"]) == 0
コード例 #2
0
def test_init():
    model = ObjectDetector(num_classes=2)
    model.eval()

    batch_size = 2
    ds = DummyDetectionDataset((3, 224, 224), 1, 2, 10)
    dl = DataLoader(ds, collate_fn=collate_fn, batch_size=batch_size)
    data = next(iter(dl))
    img = data[DefaultDataKeys.INPUT]

    out = model(img)

    assert len(out) == batch_size
    assert {"boxes", "labels", "scores"} <= out[0].keys()
コード例 #3
0
def test_training(tmpdir, model):
    model = ObjectDetector(num_classes=2,
                           model=model,
                           pretrained=False,
                           pretrained_backbone=False)
    ds = DummyDetectionDataset((3, 224, 224), 1, 2, 10)
    dl = DataLoader(ds, collate_fn=collate_fn)
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    trainer.fit(model, dl)
コード例 #4
0
def test_jit(tmpdir):
    path = os.path.join(tmpdir, "test.pt")

    model = ObjectDetector(2)
    model.eval()

    model = torch.jit.script(
        model)  # torch.jit.trace doesn't work with torchvision RCNN

    torch.jit.save(model, path)
    model = torch.jit.load(path)

    out = model([torch.rand(3, 32, 32)])

    # torchvision RCNN always returns a (Losses, Detections) tuple in scripting
    out = out[1]

    assert {"boxes", "labels", "scores"} <= out[0].keys()
コード例 #5
0
def test_detection_fiftyone(tmpdir, head, backbone):

    train_dataset = _create_synth_fiftyone_dataset(tmpdir)

    data = ObjectDetectionData.from_fiftyone(train_dataset=train_dataset,
                                             batch_size=1)
    model = ObjectDetector(head=head,
                           backbone=backbone,
                           num_classes=data.num_classes)

    trainer = flash.Trainer(fast_dev_run=True, gpus=torch.cuda.device_count())

    trainer.finetune(model, data, strategy="freeze")

    test_image_one = os.fspath(tmpdir / "test_one.png")
    test_image_two = os.fspath(tmpdir / "test_two.png")

    Image.new("RGB", (512, 512)).save(test_image_one)
    Image.new("RGB", (512, 512)).save(test_image_two)

    test_images = [str(test_image_one), str(test_image_two)]
    model.predict(test_images)
コード例 #6
0
def test_detection_fiftyone(tmpdir, model, backbone):

    train_dataset = _create_synth_fiftyone_dataset(tmpdir)

    data = ObjectDetectionData.from_fiftyone(train_dataset=train_dataset,
                                             batch_size=1)
    model = ObjectDetector(model=model,
                           backbone=backbone,
                           num_classes=data.num_classes)

    trainer = flash.Trainer(fast_dev_run=True)

    trainer.finetune(model, data)

    test_image_one = os.fspath(tmpdir / "test_one.png")
    test_image_two = os.fspath(tmpdir / "test_two.png")

    Image.new('RGB', (512, 512)).save(test_image_one)
    Image.new('RGB', (512, 512)).save(test_image_two)

    test_images = [str(test_image_one), str(test_image_two)]
    model.predict(test_images)
コード例 #7
0
def test_detection(tmpdir, head, backbone):

    train_folder, coco_ann_path = _create_synth_coco_dataset(tmpdir)

    datamodule = ObjectDetectionData.from_coco(train_folder=train_folder, train_ann_file=coco_ann_path, batch_size=1)
    model = ObjectDetector(head=head, backbone=backbone, num_classes=datamodule.num_classes)

    trainer = flash.Trainer(fast_dev_run=True, gpus=torch.cuda.device_count())

    trainer.finetune(model, datamodule=datamodule, strategy="freeze")

    test_image_one = os.fspath(tmpdir / "test_one.png")
    test_image_two = os.fspath(tmpdir / "test_two.png")

    Image.new("RGB", (512, 512)).save(test_image_one)
    Image.new("RGB", (512, 512)).save(test_image_two)

    datamodule = ObjectDetectionData.from_files(predict_files=[str(test_image_one), str(test_image_two)], batch_size=1)
    trainer.predict(model, datamodule=datamodule)
コード例 #8
0
def test_init():
    model = ObjectDetector(num_classes=2)
    model.eval()

    batch_size = 2
    ds = DummyDetectionDataset((128, 128, 3), 1, 2, 10)
    dl = model.process_predict_dataset(ds, batch_size=batch_size)
    data = next(iter(dl))

    out = model.forward(data[DefaultDataKeys.INPUT])

    assert len(out) == batch_size
    assert all(isinstance(res, dict) for res in out)
    assert all("bboxes" in res for res in out)
    assert all("labels" in res for res in out)
    assert all("scores" in res for res in out)
コード例 #9
0
from flash.image import ObjectDetectionData, ObjectDetector

# 1. Download the data
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data(
    "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip",
    "data/")

# 2. Load the Data
datamodule = ObjectDetectionData.from_coco(
    train_folder="data/coco128/images/train2017/",
    train_ann_file="data/coco128/annotations/instances_train2017.json",
    val_split=0.3,
    batch_size=4,
    num_workers=4,
)

# 3. Build the model
model = ObjectDetector(model="retinanet", num_classes=datamodule.num_classes)

# 4. Create the trainer
trainer = flash.Trainer(max_epochs=3,
                        limit_train_batches=1,
                        limit_val_batches=1)

# 5. Finetune the model
trainer.finetune(model, datamodule=datamodule)

# 6. Save it!
trainer.save_checkpoint("object_detection_model.pt")
コード例 #10
0
datamodule = ObjectDetectionData.from_icedata(
    train_folder=data_dir,
    predict_folder=os.path.join(data_dir, "odFridgeObjects", "images"),
    val_split=0.1,
    transform_kwargs={"image_size": 128},
    parser=icedata.fridge.parser,
    batch_size=8,
)

# 2. Build the task
model = ObjectDetector(
    head="efficientdet",
    backbone="d0",
    labels=datamodule.labels,
    image_size=128,
    lr_scheduler=("multisteplr", {
        "milestones": [20]
    }),
)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=30)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")

# 4. Set the output and get some predictions
predictions = trainer.predict(model, datamodule=datamodule,
                              output="fiftyone")  # output FiftyOne format

# 5. Visualize predictions in FiftyOne app
# Optional: pass `wait=True` to block execution until App is closed
コード例 #11
0
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data(
    "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip",
    "data/")

datamodule = ObjectDetectionData.from_coco(
    train_folder="data/coco128/images/train2017/",
    train_ann_file="data/coco128/annotations/instances_train2017.json",
    val_split=0.1,
    image_size=128,
)

# 2. Build the task
model = ObjectDetector(head="efficientdet",
                       backbone="d0",
                       num_classes=datamodule.num_classes,
                       image_size=128)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")

# 4. Detect objects in a few images!
predictions = model.predict([
    "data/coco128/images/train2017/000000000625.jpg",
    "data/coco128/images/train2017/000000000626.jpg",
    "data/coco128/images/train2017/000000000629.jpg",
])
print(predictions)

# 5. Save the model!
コード例 #12
0
ファイル: train.py プロジェクト: aribornstein/CocoDemo
            os.getcwd(), "data/coco128/annotations/instances_train2017.json"))
    parser.add_argument('--max_epochs', type=int, default=1)
    parser.add_argument('--learning_rate', type=float, default=1e-3)
    parser.add_argument('--gpus', type=int, default=None)
    args = parser.parse_args()

    # 1. Download the data
    if args.download:
        # Dataset Credit: https://www.kaggle.com/ultralytics/coco128
        download_data(
            "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip",
            os.path.join(os.getcwd(), "data/"))

    # 2. Load the Data
    datamodule = ObjectDetectionData.from_coco(
        train_folder=args.train_folder,
        train_ann_file=args.train_ann_file,
        batch_size=2)

    # 3. Build the model
    model = ObjectDetector(num_classes=datamodule.num_classes)

    # 4. Create the trainer
    trainer = flash.Trainer(max_epochs=args.max_epochs, gpus=args.gpus)

    # 5. Finetune the model
    trainer.finetune(model, datamodule)

    # 6. Save it!
    trainer.save_checkpoint("object_detection_model.pt")
コード例 #13
0
import icedata  # noqa: E402

# 1. Create the DataModule
data_dir = icedata.fridge.load_data()

datamodule = ObjectDetectionData.from_folders(
    train_folder=data_dir,
    predict_folder=data_dir,
    val_split=0.1,
    image_size=128,
    parser=icedata.fridge.parser,
)

# 2. Build the task
model = ObjectDetector(head="efficientdet",
                       backbone="d0",
                       num_classes=datamodule.num_classes,
                       image_size=128)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")

# 4. Set the serializer and get some predictions
model.serializer = FiftyOneDetectionLabels(
    return_filepath=True)  # output FiftyOne format
predictions = trainer.predict(model, datamodule=datamodule)
predictions = list(chain.from_iterable(predictions))  # flatten batches

# 5. Visualize predictions in FiftyOne app
# Optional: pass `wait=True` to block execution until App is closed
session = visualize(predictions, wait=True)
コード例 #14
0
def test_training(tmpdir, head):
    model = ObjectDetector(num_classes=2, head=head, pretrained=False)
    ds = DummyDetectionDataset((128, 128, 3), 1, 2, 10)
    dl = model.process_train_dataset(ds, 2, 0, False, None)
    trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
    trainer.fit(model, dl)
コード例 #15
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flash.core.data.utils import download_data
from flash.image import ObjectDetector

# 1. Download the data
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data(
    "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip",
    "data/")

# 2. Load the model from a checkpoint
model = ObjectDetector.load_from_checkpoint(
    "https://flash-weights.s3.amazonaws.com/object_detection_model.pt")

# 3. Detect the object on the images
predictions = model.predict([
    "data/coco128/images/train2017/000000000025.jpg",
    "data/coco128/images/train2017/000000000520.jpg",
    "data/coco128/images/train2017/000000000532.jpg",
])
print(predictions)
コード例 #16
0
def test_load_from_checkpoint_dependency_error():
    with pytest.raises(ModuleNotFoundError,
                       match=re.escape("'lightning-flash[image]'")):
        ObjectDetector.load_from_checkpoint("not_a_real_checkpoint.pt")
コード例 #17
0
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data(
    "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip",
    "data/")

datamodule = ObjectDetectionData.from_coco(
    train_folder="data/coco128/images/train2017/",
    train_ann_file="data/coco128/annotations/instances_train2017.json",
    val_split=0.1,
    transform_kwargs={"image_size": 512},
    batch_size=4,
)

# 2. Build the task
model = ObjectDetector(head="efficientdet",
                       backbone="d0",
                       num_classes=datamodule.num_classes,
                       image_size=512)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")

# 4. Detect objects in a few images!
datamodule = ObjectDetectionData.from_files(
    predict_files=[
        "data/coco128/images/train2017/000000000625.jpg",
        "data/coco128/images/train2017/000000000626.jpg",
        "data/coco128/images/train2017/000000000629.jpg",
    ],
    transform_kwargs={"image_size": 512},
    batch_size=4,