Пример #1
0
    def __init__(self, model, model_save_path, num_workers=4, batch_size=128, transforms=None):
        optimizer = optim.Adam(model.parameters())
        scheduler = SchedulerWrapperLossOnPlateau(optimizer)
        loss = nn.CrossEntropyLoss()
        metrics_calculator = MetricsCalculatorAccuracy()
        trainer_cls = TrainerClassification

        if transforms is None:
            transforms = ToTensor()

        train_dataset = get_dataset(mode=MNISTImagesDataset.MODE_TRAIN, transforms=transforms)
        val_dataset = get_dataset(mode=MNISTImagesDataset.MODE_VAL, transforms=transforms)

        super().__init__(
            model=model,
            model_save_path=model_save_path,
            optimizer=optimizer,
            scheduler=scheduler,
            loss=loss,
            metrics_calculator=metrics_calculator,
            batch_size=batch_size,
            num_workers=num_workers,
            train_dataset=train_dataset,
            val_dataset=val_dataset,
            trainer_cls=trainer_cls)
Пример #2
0
    def __init__(self, model, model_save_path, num_workers=8, batch_size=128, transforms=None, epoch_count=100, print_frequency=10):
        optimizer = optim.SGD(model.parameters(), lr=0.1, weight_decay=5e-4)
        scheduler = SchedulerWrapperLossOnPlateau(optimizer)
        loss = nn.CrossEntropyLoss()
        metrics_calculator = MetricsCalculatorAccuracy()
        trainer_cls = TrainerClassification

        if transforms is None:
            transforms = ToTensor()

        train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms, train=True)
        val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms, train=False)

        super().__init__(
            model=model,
            model_save_path=model_save_path,
            optimizer=optimizer,
            scheduler=scheduler,
            loss=loss,
            metrics_calculator=metrics_calculator,
            batch_size=batch_size,
            num_workers=num_workers,
            train_dataset=train_dataset,
            val_dataset=val_dataset,
            trainer_cls=trainer_cls,
            print_frequency=print_frequency,
            epoch_count=epoch_count)
Пример #3
0
    def __init__(self, model, model_save_path, num_workers=8, batch_size=128, transforms=None):
        #optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
        optimizer = optim.Adam(model.parameters(), lr=0.001)#, weight_decay=5e-4)
        scheduler = SchedulerWrapperLossOnPlateau(optimizer)
        loss = nn.CrossEntropyLoss()
        metrics_calculator = MetricsCalculatorAccuracy()
        trainer_cls = TrainerClassification

        if transforms is None:
            transforms = ToTensor()

        train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms)
        val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms)

        super().__init__(
            model=model,
            model_save_path=model_save_path,
            optimizer=optimizer,
            scheduler=scheduler,
            loss=loss,
            metrics_calculator=metrics_calculator,
            batch_size=batch_size,
            num_workers=num_workers,
            train_dataset=train_dataset,
            val_dataset=val_dataset,
            trainer_cls=trainer_cls)
Пример #4
0
    def __init__(self, model):
        optimizer = optim.Adam(model.parameters())
        scheduler = SchedulerWrapperLossOnPlateau(optimizer)
        loss = nn.CrossEntropyLoss()
        metrics_calculator = MetricsCalculatorAccuracy()

        super().__init__(model=model,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         loss=loss,
                         metrics_calculator=metrics_calculator,
                         batch_size=32,
                         num_workers=4)
Пример #5
0
    def __init__(self,
                 model,
                 model_save_path,
                 num_workers=16,
                 batch_size=128,
                 learning_rate=0.1,
                 transforms=None,
                 use_mixup=False):
        parameters_bias = [
            p[1] for p in model.named_parameters() if 'bias' in p[0]
        ]
        parameters_scale = [
            p[1] for p in model.named_parameters() if 'scale' in p[0]
        ]
        parameters_others = [
            p[1] for p in model.named_parameters()
            if not ('bias' in p[0] or 'scale' in p[0])
        ]

        optimizer = optim.SGD([{
            'params': parameters_bias,
            'lr': learning_rate / 10.
        }, {
            'params': parameters_scale,
            'lr': learning_rate / 10.
        }, {
            'params': parameters_others
        }],
                              lr=learning_rate,
                              momentum=0.9,
                              weight_decay=5e-4)
        scheduler = SchedulerWrapperLossOnPlateau(optimizer)
        loss = nn.CrossEntropyLoss()
        metrics_calculator = MetricsCalculatorAccuracy()
        trainer_cls = TrainerClassification

        if transforms is None:
            transforms = Compose([
                ToTensor(),
                Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
            ])

        train_dataset = get_dataset(path=TRAIN_DATASET_PATH,
                                    transforms=transforms,
                                    use_mixup=use_mixup)
        val_dataset = get_dataset(path=TEST_DATASET_PATH,
                                  transforms=transforms,
                                  use_mixup=use_mixup)

        if use_mixup:
            train_dataset = MixUpDatasetWrapper(train_dataset, alpha=0.7)
            loss = VectorCrossEntropy()

        super().__init__(model=model,
                         model_save_path=model_save_path,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         loss=loss,
                         metrics_calculator=metrics_calculator,
                         batch_size=batch_size,
                         num_workers=num_workers,
                         train_dataset=train_dataset,
                         val_dataset=val_dataset,
                         trainer_cls=trainer_cls,
                         print_frequency=100)
Пример #6
0
    def test_accuracy(self):
        metrics_calculator = MetricsCalculatorAccuracy(border=0.4)

        with pytest.raises(PipelineError):
            metrics_calculator.calculate()