def test_metric_learning_pipeline(): """ Test if classification pipeline can run and compute metrics. In this test we check that LoaderMetricCallback works with CMCMetric (ICallbackLoaderMetric). """ with TemporaryDirectory() as tmp_dir: dataset_train = datasets.MnistMLDataset(root=tmp_dir, download=True) sampler = data.BalanceBatchSampler(labels=dataset_train.get_labels(), p=5, k=10) train_loader = DataLoader( dataset=dataset_train, sampler=sampler, batch_size=sampler.batch_size, ) dataset_val = datasets.MnistQGDataset(root=tmp_dir, transform=None, gallery_fraq=0.2) val_loader = DataLoader(dataset=dataset_val, batch_size=1024) model = DummyModel(num_features=28 * 28, num_classes=NUM_CLASSES) optimizer = Adam(model.parameters(), lr=0.001) sampler_inbatch = data.HardTripletsSampler(norm_required=False) criterion = nn.TripletMarginLossWithSampler(margin=0.5, sampler_inbatch=sampler_inbatch) callbacks = OrderedDict( { "cmc": dl.ControlFlowCallback( LoaderMetricCallback( CMCMetric( topk_args=[1], embeddings_key="embeddings", labels_key="targets", is_query_key="is_query", ), input_key=["embeddings", "is_query"], target_key=["targets"], ), loaders="valid", ), "control": dl.PeriodicLoaderCallback( valid_loader_key="valid", valid_metric_key="cmc", valid=2 ), } ) runner = CustomRunner(input_key="features", output_key="embeddings") runner.train( model=model, criterion=criterion, optimizer=optimizer, callbacks=callbacks, loaders=OrderedDict({"train": train_loader, "valid": val_loader}), verbose=False, valid_loader="valid", num_epochs=4, ) assert "cmc01" in runner.loader_metrics
def main() -> None: """ This function checks metric learning pipeline with different triplets samplers. """ cmc_score_th = 0.9 # Note! cmc_score should be > 0.97 # after 600 epoch. Please check it mannually # to avoid wasting time of CI pod all_sampler = data.AllTripletsSampler(max_output_triplets=512) hard_sampler = data.HardTripletsSampler(norm_required=False) assert run_ml_pipeline(all_sampler) > cmc_score_th assert run_ml_pipeline(hard_sampler) > cmc_score_th
def train_experiment(device, engine=None): with TemporaryDirectory() as logdir: from catalyst import utils utils.set_global_seed(RANDOM_STATE) # 1. train, valid and test loaders transforms = Compose([ToTensor(), Normalize((0.1307, ), (0.3081, ))]) train_data = MNIST(os.getcwd(), train=True, download=True, transform=transforms) train_labels = train_data.targets.cpu().numpy().tolist() train_sampler = data.BatchBalanceClassSampler(train_labels, num_classes=10, num_samples=4) train_loader = DataLoader(train_data, batch_sampler=train_sampler) valid_dataset = MNIST(root=os.getcwd(), transform=transforms, train=False, download=True) valid_loader = DataLoader(dataset=valid_dataset, batch_size=32) test_dataset = MNIST(root=os.getcwd(), transform=transforms, train=False, download=True) test_loader = DataLoader(dataset=test_dataset, batch_size=32) # 2. model and optimizer model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 16), nn.LeakyReLU(inplace=True)) optimizer = Adam(model.parameters(), lr=LR) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2]) # 3. criterion with triplets sampling sampler_inbatch = data.HardTripletsSampler(norm_required=False) criterion = nn.TripletMarginLossWithSampler( margin=0.5, sampler_inbatch=sampler_inbatch) # 4. training with catalyst Runner class CustomRunner(dl.SupervisedRunner): def handle_batch(self, batch) -> None: images, targets = batch["features"].float( ), batch["targets"].long() features = self.model(images) self.batch = { "embeddings": features, "targets": targets, } callbacks = [ dl.ControlFlowCallback( dl.CriterionCallback(input_key="embeddings", target_key="targets", metric_key="loss"), loaders="train", ), dl.SklearnModelCallback( feature_key="embeddings", target_key="targets", train_loader="train", valid_loaders=["valid", "infer"], model_fn=RandomForestClassifier, predict_method="predict_proba", predict_key="sklearn_predict", random_state=RANDOM_STATE, n_estimators=50, ), dl.ControlFlowCallback( dl.AccuracyCallback(target_key="targets", input_key="sklearn_predict", topk_args=(1, 3)), loaders=["valid", "infer"], ), ] runner = CustomRunner(input_key="features", output_key="embeddings") runner.train( engine=engine or dl.DeviceEngine(device), model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, callbacks=callbacks, loaders={ "train": train_loader, "valid": valid_loader, "infer": test_loader }, verbose=False, valid_loader="valid", valid_metric="accuracy", minimize_valid_metric=False, num_epochs=TRAIN_EPOCH, logdir=logdir, ) valid_path = Path(logdir) / "logs/infer.csv" best_accuracy = max( float(row["accuracy"]) for row in read_csv(valid_path)) assert best_accuracy > 0.8
def train_experiment(device, engine=None): with TemporaryDirectory() as logdir: from catalyst import utils utils.set_global_seed(RANDOM_STATE) # 1. generate data num_samples, num_features, num_classes = int(1e4), int(30), 3 X, y = make_classification( n_samples=num_samples, n_features=num_features, n_informative=num_features, n_repeated=0, n_redundant=0, n_classes=num_classes, n_clusters_per_class=1, ) X, y = torch.tensor(X), torch.tensor(y) dataset = TensorDataset(X, y) loader = DataLoader(dataset, batch_size=64, num_workers=1, shuffle=True) # 2. model, optimizer and scheduler hidden_size, out_features = 20, 16 model = nn.Sequential(nn.Linear(num_features, hidden_size), nn.ReLU(), nn.Linear(hidden_size, out_features)) optimizer = Adam(model.parameters(), lr=LR) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2]) # 3. criterion with triplets sampling sampler_inbatch = data.HardTripletsSampler(norm_required=False) criterion = nn.TripletMarginLossWithSampler( margin=0.5, sampler_inbatch=sampler_inbatch) # 4. training with catalyst Runner class CustomRunner(dl.SupervisedRunner): def handle_batch(self, batch) -> None: features, targets = batch["features"].float( ), batch["targets"].long() embeddings = self.model(features) self.batch = { "embeddings": embeddings, "targets": targets, } callbacks = [ dl.SklearnModelCallback( feature_key="embeddings", target_key="targets", train_loader="train", valid_loaders="valid", model_fn=RandomForestClassifier, predict_method="predict_proba", predict_key="sklearn_predict", random_state=RANDOM_STATE, n_estimators=100, ), dl.ControlFlowCallback( dl.AccuracyCallback(target_key="targets", input_key="sklearn_predict", topk_args=(1, 3)), loaders="valid", ), ] runner = CustomRunner(input_key="features", output_key="embeddings") runner.train( engine=engine or dl.DeviceEngine(device), model=model, criterion=criterion, optimizer=optimizer, callbacks=callbacks, scheduler=scheduler, loaders={ "train": loader, "valid": loader }, verbose=False, valid_loader="valid", valid_metric="accuracy", minimize_valid_metric=False, num_epochs=TRAIN_EPOCH, logdir=logdir, ) valid_path = Path(logdir) / "logs/valid.csv" best_accuracy = max( float(row["accuracy"]) for row in read_csv(valid_path)) assert best_accuracy > 0.9
def train_experiment(device, engine=None): with TemporaryDirectory() as logdir: # 1. train and valid loaders transforms = Compose([ToTensor(), Normalize((0.1307, ), (0.3081, ))]) train_dataset = datasets.MnistMLDataset(root=os.getcwd(), download=True, transform=transforms) sampler = data.BatchBalanceClassSampler( labels=train_dataset.get_labels(), num_classes=5, num_samples=10, num_batches=10) train_loader = DataLoader(dataset=train_dataset, batch_sampler=sampler) valid_dataset = datasets.MnistQGDataset(root=os.getcwd(), transform=transforms, gallery_fraq=0.2) valid_loader = DataLoader(dataset=valid_dataset, batch_size=1024) # 2. model and optimizer model = models.MnistSimpleNet(out_features=16) optimizer = Adam(model.parameters(), lr=0.001) # 3. criterion with triplets sampling sampler_inbatch = data.HardTripletsSampler(norm_required=False) criterion = nn.TripletMarginLossWithSampler( margin=0.5, sampler_inbatch=sampler_inbatch) # 4. training with catalyst Runner callbacks = [ dl.ControlFlowCallback( dl.CriterionCallback(input_key="embeddings", target_key="targets", metric_key="loss"), loaders="train", ), dl.ControlFlowCallback( dl.CMCScoreCallback( embeddings_key="embeddings", labels_key="targets", is_query_key="is_query", topk_args=[1], ), loaders="valid", ), dl.PeriodicLoaderCallback(valid_loader_key="valid", valid_metric_key="cmc01", minimize=False, valid=2), ] runner = CustomRunner(input_key="features", output_key="embeddings") runner.train( engine=engine or dl.DeviceEngine(device), model=model, criterion=criterion, optimizer=optimizer, callbacks=callbacks, loaders={ "train": train_loader, "valid": valid_loader }, verbose=False, logdir=logdir, valid_loader="valid", valid_metric="cmc01", minimize_valid_metric=False, num_epochs=2, )