def test_faster_rcnn_predict(mock_create, config, dataset):
    """test predict."""
    mock_create.return_value = dataset
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"

    config.checkpoint_file = ckpt_dir
    kfp_writer = MagicMock()
    writer = MagicMock()
    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        log_dir=config.system.logdir,
        distributed=config.system["distributed"],
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    image_size = (256, 256)
    image = Image.fromarray(np.random.random(image_size), "L")
    image = torchvision.transforms.functional.to_tensor(image)
    result = estimator.predict(image)
    assert result == []
예제 #2
0
def test_faster_rcnn_train_all(mock_loss, mock_train_one_epoch, config,
                               dataset):
    """test train on all epochs."""
    loss_val = 0.1
    mock_loss.return_value = loss_val
    log_dir = os.path.join(tmp_name, "train")
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()

    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        checkpoint_dir=log_dir,
        distributed=False,
    )

    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")
    checkpointer.save = MagicMock()
    train_dataset = dataset
    val_dataset = dataset
    label_mappings = train_dataset.label_mappings
    is_distributed = False
    train_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                              dataset=train_dataset,
                                              is_train=True)
    val_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                            dataset=val_dataset,
                                            is_train=False)

    train_loader = dataloader_creator(config, train_dataset, train_sampler,
                                      TRAIN, is_distributed)
    val_loader = dataloader_creator(config, val_dataset, val_sampler, VAL,
                                    is_distributed)
    epoch = 0
    estimator.train_loop(
        train_dataloader=train_loader,
        label_mappings=label_mappings,
        val_dataloader=val_loader,
        train_sampler=train_sampler,
    )
    writer.add_scalar.assert_called_with("val/loss", loss_val, epoch)
    mock_train_one_epoch.assert_called_once()
예제 #3
0
def test_faster_rcnn_log_metric_val(config):
    """test log metric val."""
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()
    writer.add_scalars = MagicMock()
    writer.add_figure = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")
    epoch = 0
    estimator.log_metric_val({"1": "car", "2": "bike"}, epoch)

    writer.add_scalars.assert_called_with("val/APIOU50-per-class", {}, epoch)
예제 #4
0
def test_faster_rcnn_save(config):
    """test save model."""

    log_dir = tmp_name + "/train/"
    kfp_writer = MagicMock()
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        checkpoint_dir=log_dir,
        distributed=False,
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer
    estimator.device = torch.device("cpu")
    estimator.save(log_dir + "FasterRCNN.estimator")

    assert any([
        name.startswith("FasterRCNN.estimator") for name in os.listdir(log_dir)
    ])
예제 #5
0
def test_faster_rcnn_load(config):
    """test load model."""

    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    log_dir = tmp_name + "/load/"
    config.logdir = log_dir
    kfp_writer = MagicMock()
    writer = SummaryWriter(config.logdir, write_to_disk=True)
    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        checkpoint_dir=log_dir,
        distributed=False,
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")
    estimator.load(ckpt_dir)
    assert os.listdir(log_dir)[0].startswith("events.out.tfevents")
예제 #6
0
def test_create_sampler(mock_create, config, dataset):
    """test create sampler."""
    mock_create.return_value = dataset
    is_distributed = False
    train_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                              dataset=dataset,
                                              is_train=True)
    assert len(dataset.images) == len(train_sampler)
예제 #7
0
def test_faster_rcnn_train(mock_create, mock_loss, mock_train_loop, config,
                           dataset):
    """test train."""
    loss_val = 0.1
    mock_loss.return_value = loss_val
    mock_create.return_value = dataset

    kfp_writer = MagicMock()
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    writer.add_scalar = MagicMock()
    writer.add_scalars = MagicMock()
    writer.add_figure = MagicMock()
    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.checkpointer = checkpointer
    estimator.kfp_writer = kfp_writer
    estimator.writer = writer
    estimator.train(train_data=None)
    mock_train_loop.assert_called_once()
예제 #8
0
def test_faster_rcnn_evaluate(mock_create, mock_loss, mock_evaluate_per_epoch,
                              config, dataset):
    """test evaluate."""
    mock_create.return_value = dataset
    loss_val = 0.1
    mock_loss.return_value = loss_val
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer
    estimator.evaluate(None)
    mock_evaluate_per_epoch.assert_called_once()
예제 #9
0
def test_create_optimizer(mock_lr, mock_adm, config, dataset):
    """test create optimizer."""
    mock_lr.return_value = MagicMock()
    mock_adm.return_value = MagicMock()

    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")
    params = [p for p in estimator.model.parameters() if p.requires_grad]
    optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)

    assert isinstance(optimizer, MagicMock)
    assert isinstance(lr_scheduler, MagicMock)
def test_create_sampler(mock_create, config, dataset):
    """test create sampler."""
    mock_create.return_value = dataset
    train_dataset = create_dataset(config, TRAIN)
    is_distributed = config.system.distributed
    train_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=train_dataset, is_train=True
    )
    assert len(dataset.images) == len(train_sampler)
예제 #11
0
def test_faster_rcnn_train_one_epoch(config, dataset):
    """test train one epoch."""
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
        no_cuda=True,
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer
    estimator.device = torch.device("cpu")
    train_dataset = dataset
    is_distributed = False
    train_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                              dataset=train_dataset,
                                              is_train=True)
    train_loader = dataloader_creator(config, train_dataset, train_sampler,
                                      TRAIN, is_distributed)
    params = [p for p in estimator.model.parameters() if p.requires_grad]
    optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)
    accumulation_steps = config.train.get("accumulation_steps", 1)
    epoch = 1
    estimator.train_one_epoch(
        optimizer=optimizer,
        data_loader=train_loader,
        epoch=epoch,
        lr_scheduler=lr_scheduler,
        accumulation_steps=accumulation_steps,
    )
    writer.add_scalar.assert_called_with("training/lr",
                                         config.optimizer.args.get("lr"),
                                         epoch)
예제 #12
0
def test_dataloader_creator(mock_loader, config, dataset):
    """test create dataloader."""
    mock_loader.return_value = MagicMock()
    is_distributed = False
    train_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                              dataset=dataset,
                                              is_train=True)
    train_loader = dataloader_creator(config, dataset, train_sampler, TRAIN,
                                      is_distributed)
    assert isinstance(train_loader, MagicMock)
예제 #13
0
def test_faster_rcnn_evaluate_per_epoch(mock_loss, config, dataset):
    """test evaluate per epoch."""
    loss_val = 0.1
    mock_loss.return_value = loss_val
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()

    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        logdir="/tmp",
        no_cuda=True,
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")

    test_dataset = dataset
    label_mappings = test_dataset.label_mappings
    is_distributed = False
    test_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                             dataset=test_dataset,
                                             is_train=False)
    test_loader = dataloader_creator(config, test_dataset, test_sampler, TEST,
                                     is_distributed)
    sync_metrics = config.get("synchronize_metrics", True)
    epoch = 0
    estimator.evaluate_per_epoch(
        data_loader=test_loader,
        epoch=epoch,
        label_mappings=label_mappings,
        synchronize_metrics=sync_metrics,
    )
    writer.add_scalar.assert_called_with("val/loss", loss_val, epoch)
def test_faster_rcnn_log_metric_val(mock_create, config, dataset):
    """test log metric val."""
    mock_create.return_value = dataset
    writer = MagicMock()
    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()
    writer.add_scalars = MagicMock()
    writer.add_figure = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    epoch = 0
    estimator.log_metric_val(dataset.label_mappings, epoch)

    writer.add_scalars.assert_called_with("val/APIOU50-per-class", {}, epoch)
def test_create_optimizer(mock_create, mock_lr, mock_adm, config, dataset):
    """test create optimizer."""
    mock_lr.return_value = MagicMock()
    mock_adm.return_value = MagicMock()

    mock_create.return_value = dataset
    writer = MagicMock()
    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    params = [p for p in estimator.model.parameters() if p.requires_grad]
    optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)

    assert isinstance(optimizer, MagicMock)
    assert isinstance(lr_scheduler, MagicMock)
def test_faster_rcnn_train_one_epoch(mock_create, config, dataset):
    """test train one epoch."""
    mock_create.return_value = dataset
    writer = MagicMock()
    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    train_dataset = create_dataset(config, TRAIN)
    is_distributed = config.system.distributed
    train_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=train_dataset, is_train=True
    )
    train_loader = dataloader_creator(
        config, train_dataset, train_sampler, TRAIN
    )
    params = [p for p in estimator.model.parameters() if p.requires_grad]
    optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)
    accumulation_steps = config.train.get("accumulation_steps", 1)
    epoch = 1
    estimator.train_one_epoch(
        optimizer=optimizer,
        data_loader=train_loader,
        epoch=epoch,
        lr_scheduler=lr_scheduler,
        accumulation_steps=accumulation_steps,
    )
    writer.add_scalar.assert_called_with(
        "training/lr", config.optimizer.args.get("lr"), epoch
    )
def test_faster_rcnn_evaluate(
    mock_create, mock_loss, mock_evaluate_per_epoch, config, dataset
):
    """test evaluate."""
    mock_create.return_value = dataset
    loss_val = 0.1
    mock_loss.return_value = loss_val
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    writer = MagicMock()
    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    estimator.evaluate()
    mock_evaluate_per_epoch.assert_called_once()
def test_dataloader_creator(mock_create, mock_loader, config, dataset):
    """test create dataloader."""
    mock_create.return_value = dataset
    mock_loader.return_value = MagicMock()
    train_dataset = create_dataset(config, TRAIN)
    is_distributed = config.system.distributed
    train_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=train_dataset, is_train=True
    )
    train_loader = dataloader_creator(
        config, train_dataset, train_sampler, TRAIN
    )
    assert isinstance(train_loader, MagicMock)
def test_faster_rcnn_load(mock_create, config, dataset):
    """test load model."""
    mock_create.return_value = dataset
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    log_dir = tmp_name + "/load/"
    config.system.logdir = log_dir
    kfp_writer = MagicMock()
    writer = SummaryWriter(config.system.logdir, write_to_disk=True)
    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        log_dir=log_dir,
        distributed=config.system["distributed"],
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    estimator.load(ckpt_dir)
    assert os.listdir(log_dir)[0].startswith("events.out.tfevents")
def test_faster_rcnn_evaluate_per_epoch(
    mock_create, mock_loss, config, dataset
):
    """test evaluate per epoch."""
    loss_val = 0.1
    mock_loss.return_value = loss_val
    mock_create.return_value = dataset
    ckpt_dir = tmp_name + "/train/FasterRCNN.estimator"
    config.checkpoint_file = ckpt_dir
    writer = MagicMock()
    kfp_writer = MagicMock()
    checkpointer = MagicMock()
    writer.add_scalar = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    test_dataset = create_dataset(config, TEST)
    label_mappings = test_dataset.label_mappings
    is_distributed = config.system.distributed
    test_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=test_dataset, is_train=False
    )
    test_loader = dataloader_creator(config, test_dataset, test_sampler, TEST)
    sync_metrics = config.get("synchronize_metrics", True)
    epoch = 0
    estimator.evaluate_per_epoch(
        data_loader=test_loader,
        epoch=epoch,
        label_mappings=label_mappings,
        is_distributed=config.system.distributed,
        synchronize_metrics=sync_metrics,
    )
    writer.add_scalar.assert_called_with("val/loss", loss_val, epoch)
def test_faster_rcnn_save(mock_create, config, dataset):
    """test save model."""
    mock_create.return_value = dataset
    log_dir = tmp_name + "/test_save/"
    config.system.logdir = log_dir
    kfp_writer = MagicMock()
    writer = MagicMock()
    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        log_dir=log_dir,
        distributed=config.system["distributed"],
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    estimator.save(log_dir + "FasterRCNN_test")

    assert any(
        [name.startswith("FasterRCNN_test") for name in os.listdir(log_dir)]
    )
def test_faster_rcnn_train(
    mock_create, mock_loss, mock_train_loop, config, dataset
):
    """test train."""
    loss_val = 0.1
    mock_loss.return_value = loss_val
    mock_create.return_value = dataset

    kfp_writer = MagicMock()
    writer = MagicMock
    writer.add_scalar = MagicMock()
    writer.add_scalars = MagicMock()
    writer.add_figure = MagicMock()

    checkpointer = MagicMock()
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    estimator.train()
    mock_train_loop.assert_called_once()
예제 #23
0
def test_create_dataloader(mock_loader, config, dataset):
    """test load data."""

    mock_loader.return_value = MagicMock()
    is_distributed = False
    train_sampler = FasterRCNN.create_sampler(is_distributed=is_distributed,
                                              dataset=dataset,
                                              is_train=True)
    dataloader = create_dataloader(
        distributed=is_distributed,
        dataset=dataset,
        batch_size=config.train.batch_size,
        sampler=train_sampler,
        collate_fn=FasterRCNN.collate_fn,
        train=True,
    )
    assert isinstance(dataloader, MagicMock)
def test_faster_rcnn_train_all(
    mock_create, mock_loss, mock_train_one_epoch, config, dataset
):
    """test train on all epochs."""
    loss_val = 0.1
    mock_create.return_value = dataset
    mock_loss.return_value = loss_val
    log_dir = tmp_name + "/train/"
    config.system.logdir = log_dir
    writer = MagicMock()
    kfp_writer = MagicMock()

    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        log_dir=log_dir,
        distributed=config.system["distributed"],
    )

    estimator = FasterRCNN(
        config=config,
        writer=writer,
        device=torch.device("cpu"),
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
    )
    train_dataset = create_dataset(config, TRAIN)
    val_dataset = create_dataset(config, VAL)
    label_mappings = train_dataset.label_mappings
    is_distributed = config.system.distributed
    train_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=train_dataset, is_train=True
    )
    val_sampler = FasterRCNN.create_sampler(
        is_distributed=is_distributed, dataset=val_dataset, is_train=False
    )

    train_loader = dataloader_creator(
        config, train_dataset, train_sampler, TRAIN
    )
    val_loader = dataloader_creator(config, val_dataset, val_sampler, VAL)
    epoch = 0
    estimator.train_loop(
        train_dataloader=train_loader,
        label_mappings=label_mappings,
        val_dataloader=val_loader,
        train_sampler=train_sampler,
    )
    writer.add_scalar.assert_called_with("val/loss", loss_val, epoch)
    mock_train_one_epoch.assert_called_once()
예제 #25
0
def test_faster_rcnn_predict(config, dataset):
    """test predict."""

    checkpoint_file = tmp_name + "/train/FasterRCNN.estimator"
    kfp_writer = MagicMock()
    writer = MagicMock()

    # XXX This is just a hot fix to prevent a mysterious folder such as:
    # <MagicMock name='mock.logdir' id='140420520377936'> showed up after
    # running this test.
    writer.logdir = tmp_name

    checkpointer = EstimatorCheckpoint(
        estimator_name=config.estimator,
        checkpoint_dir="/tmp",
        distributed=False,
    )
    estimator = FasterRCNN(
        config=config,
        writer=writer,
        checkpointer=checkpointer,
        kfp_writer=kfp_writer,
        checkpoint_file=checkpoint_file,
        logdir="/tmp",
        no_cuda=True,
    )
    estimator.writer = writer
    estimator.kfp_writer = kfp_writer
    estimator.checkpointer = checkpointer

    estimator.device = torch.device("cpu")
    image_size = (256, 256)
    image = Image.fromarray(np.random.random(image_size), "L")

    result = estimator.predict(image)
    assert result == []
예제 #26
0
def test_collate_fn():
    """test collate fn."""
    input_tupple = (("x0", "x1", "x2", "xn"), ("y0", "y1", "y2", "yn"))
    actual_result = FasterRCNN.collate_fn(input_tupple)
    expected_result = (("x0", "y0"), ("x1", "y1"), ("x2", "y2"), ("xn", "yn"))
    assert actual_result == expected_result
예제 #27
0
def dataset():
    """prepare dataset."""
    dummy_data = DummyDetection2D(transform=FasterRCNN.get_transform())
    return dummy_data