Пример #1
0
def evaluate(args):
    label_map = load_label_map(args.dataset)
    n_classes = 50
    if args.dataset == "include":
        n_classes = 263

    if args.use_cnn:
        dataset = FeaturesDatset(
            features_dir=os.path.join(args.data_dir,
                                      f"{args.dataset}_test_features"),
            label_map=label_map,
            mode="test",
        )

    else:
        dataset = KeypointsDataset(
            keypoints_dir=os.path.join(args.data_dir,
                                       f"{args.dataset}_test_keypoints"),
            use_augs=False,
            label_map=label_map,
            mode="test",
            max_frame_len=169,
        )

    dataloader = data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    if args.model == "lstm":
        config = LstmConfig()
        if args.use_cnn:
            config.input_size = CnnConfig.output_dim
        model = LSTM(config=config, n_classes=n_classes)
    else:
        config = TransformerConfig(size=args.transformer_size)
        if args.use_cnn:
            config.input_size = CnnConfig.output_dim
        model = Transformer(config=config, n_classes=n_classes)

    model = model.to(device)

    if args.use_pretrained == "evaluate":
        model, _, _ = load_pretrained(args, n_classes, model)
        print("### Model loaded ###")

    else:
        exp_name = get_experiment_name(args)
        model_path = os.path.join(args.save_path, exp_name) + ".pth"
        ckpt = torch.load(model_path)
        model.load_state_dict(ckpt["model"])
        print("### Model loaded ###")

    test_loss, test_acc = validate(dataloader, model, device)
    print("Evaluation Results:")
    print(f"Loss: {test_loss}, Accuracy: {test_acc}")
Пример #2
0
def create_distance_matrix(dataset):
    experiment_name = get_experiment_name(EXPERIMENT_NAME,
                                          pairwise=True,
                                          ranking=True,
                                          adaptive=ADAPTIVE,
                                          loss=LOSS)

    dataset = list(dataset)
    net = FinalResnet()

    # if pretrained_model_name is not None:
    #     checkpoint_path = get_checkpoint_path(pretrained_model_name)
    #     data = torch.load(checkpoint_path, map_location=DEVICE)
    # else:
    #     data = None

    adver_net = AdverserialNetwork()
    stats_manager = StatsManager()

    exp = AdaptiveExperiment(net,
                             adver_net,
                             stats_manager,
                             output_dir=experiment_name,
                             perform_validation_during_training=False,
                             pretrained_data=None)

    ref_img1, ref_img2 = get_two_ref_target_imgs()
    dataset.extend([ref_img1[0].unsqueeze(0)] + [ref_img2[0].unsqueeze(0)])

    num_samples = len(dataset)
    distance_matrix = np.zeros((num_samples, num_samples))

    exp.net.eval()
    exp.adv_net.eval()

    with torch.no_grad():
        for i in range(num_samples - 1):
            for j in range(i + 1, num_samples):
                x1_s, x2_s = dataset[i], dataset[j]
                if isinstance(x1_s, np.ndarray) and isinstance(
                        x2_s, np.ndarray):
                    x1_s = torch.from_numpy(x1_s)
                    x2_s = torch.from_numpy(x2_s)
                x = torch.cat([x1_s, x2_s], dim=1)
                x = x.to(exp.net.device)
                f, y = exp.net.forward_adaptive(x)
                distance_matrix[i, j] = y[0][0]
                distance_matrix[j, i] = y[0][0]

    return distance_matrix, ref_img1[1], ref_img2[1]
Пример #3
0
def evaluate(args):
    test_files = sorted(
        glob.glob(
            os.path.join(args.data_dir, f"{args.dataset}_test_keypoints",
                         "*.json")))

    test_df = load_dataframe(test_files)

    label_map = load_label_map(args.dataset)
    x_test, y_test = preprocess(test_df, args.use_augs, label_map, "test")

    exp_name = get_experiment_name(args)
    config = XgbConfig()
    model = Xgboost(config=config)
    load_path = os.path.join(args.save_dir, exp_name, ".pickle.dat")
    model.load(load_path)
    print("### Model loaded ###")

    test_preds = model(x_test)
    print("Test accuracy:", accuracy_score(y_test, test_preds))
Пример #4
0
def fit(args):
    train_files = sorted(
        glob.glob(
            os.path.join(args.data_dir, f"{args.dataset}_train_keypoints",
                         "*.json")))
    val_files = sorted(
        glob.glob(
            os.path.join(args.data_dir, f"{args.dataset}_val_keypoints",
                         "*.json")))

    train_df = load_dataframe(train_files)
    val_df = load_dataframe(val_files)

    label_map = load_label_map(args.dataset)
    x_train, y_train = preprocess(train_df, args.use_augs, label_map, "train")
    x_val, y_val = preprocess(val_df, args.use_augs, label_map, "val")

    config = XgbConfig()
    model = Xgboost(config=config)
    model.fit(x_train, y_train, x_val, y_val)

    exp_name = get_experiment_name(args)
    save_path = os.path.join(args.save_dir, exp_name, ".pickle.dat")
    model.save(save_path)
Пример #5
0
from data import get_train_loader, get_test_loaders

from losses import *
from continuous_losses import *

from networks import initialize_model, freeze_layers, freeze_conv_layers
from train import train, continuous_train
from metrics import initialize_metrics, evaluation, update_metrics, write_results

import utils
from utils import device

args = utils.config()
print(args)

experiment_name = utils.get_experiment_name()

utils.make_directory("../logs")
logging.basicConfig(filename=os.path.join(
    '../logs/{}.log'.format(experiment_name)),
                    level=logging.INFO,
                    format='%(asctime)s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
utils.print_log('START with Configuration : {}'.format(args))

data_path = utils.make_directory(args.data_path)
partitions, partitions_train, partitions_tune = utils.get_partitions()

if args.dataset == "Cars3D":
    train_data = Cars3D(root=args.data_path,
                        mode="train",
Пример #6
0
def fit(args):
    exp_name = get_experiment_name(args)
    logging_path = os.path.join(args.save_path, exp_name) + ".log"
    logging.basicConfig(filename=logging_path,
                        level=logging.INFO,
                        format="%(message)s")
    seed_everything(args.seed)
    label_map = load_label_map(args.dataset)

    if args.use_cnn:
        train_dataset = FeaturesDatset(
            features_dir=os.path.join(args.data_dir,
                                      f"{args.dataset}_train_features"),
            label_map=label_map,
            mode="train",
        )
        val_dataset = FeaturesDatset(
            features_dir=os.path.join(args.data_dir,
                                      f"{args.dataset}_val_features"),
            label_map=label_map,
            mode="val",
        )

    else:
        train_dataset = KeypointsDataset(
            keypoints_dir=os.path.join(args.data_dir,
                                       f"{args.dataset}_train_keypoints"),
            use_augs=args.use_augs,
            label_map=label_map,
            mode="train",
            max_frame_len=169,
        )
        val_dataset = KeypointsDataset(
            keypoints_dir=os.path.join(args.data_dir,
                                       f"{args.dataset}_val_keypoints"),
            use_augs=False,
            label_map=label_map,
            mode="val",
            max_frame_len=169,
        )

    train_dataloader = data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    val_dataloader = data.DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    n_classes = 50
    if args.dataset == "include":
        n_classes = 263

    if args.model == "lstm":
        config = LstmConfig()
        if args.use_cnn:
            config.input_size = CnnConfig.output_dim
        model = LSTM(config=config, n_classes=n_classes)
    else:
        config = TransformerConfig(size=args.transformer_size)
        if args.use_cnn:
            config.input_size = CnnConfig.output_dim
        model = Transformer(config=config, n_classes=n_classes)

    model = model.to(device)
    optimizer = torch.optim.AdamW(model.parameters(),
                                  lr=args.learning_rate,
                                  weight_decay=0.01)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode="max",
                                                           factor=0.2)

    if args.use_pretrained == "resume_training":
        model, optimizer, scheduler = load_pretrained(args, n_classes, model,
                                                      optimizer, scheduler)

    model_path = os.path.join(args.save_path, exp_name) + ".pth"
    es = EarlyStopping(patience=15, mode="max")
    for epoch in range(args.epochs):
        print(f"Epoch: {epoch+1}/{args.epochs}")
        train_loss, train_acc = train(train_dataloader, model, optimizer,
                                      device)
        val_loss, val_acc = validate(val_dataloader, model, device)
        logging.info(
            "Epoch: {}, train loss: {}, train acc: {}, val loss: {}, val acc: {}"
            .format(epoch + 1, train_loss, train_acc, val_loss, val_acc))
        scheduler.step(val_acc)
        es(
            model_path=model_path,
            epoch_score=val_acc,
            model=model,
            optimizer=optimizer,
            scheduler=scheduler,
        )
        if es.early_stop:
            print("Early stopping")
            break

    print("### Training Complete ###")
Пример #7
0
    if multi_gpu:
        device = torch.device("cuda") if args.gpu == -1 else torch.device(
            f'cuda:{args.gpu}')
        assert args.train_batch_size % n_gpu == 0, f"Train batch size will need to be allocated equally across {n_gpu} gpus, but {args.train_batch_size} cannot be"
        assert args.test_batch_size % n_gpu == 0, f"Eval batch size will need to be allocated equally across {n_gpu} gpus, but {args.test_batch_size} cannot be"
    else:
        device = torch.device(f"cuda:{args.gpu}")
        torch.cuda.set_device(device)
    args.device = device
    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    if multi_gpu:
        torch.cuda.manual_seed_all(args.seed)

    # make Report object, stats_dict, and paths
    args.experiment_name = experiment_name = utils.get_experiment_name(
        args) if args.experiment_name is None else args.experiment_name
    if args.small_data and args.do_train:
        experiment_name += f"_DEBUG"
    model_path = os.path.join(args.save_dir, f'{experiment_name}.pth')
    retriever_path = os.path.join(args.save_dir,
                                  f'{experiment_name}_retriever.pth')
    print(f"\nStarting experiment: {experiment_name}")
    # make pretrained_model_path
    pretrained_model_path = os.path.join(
        args.save_dir,
        f'{args.pretrained_model}.pth') if args.pretrained_model else None
    pretrained_retriever_path = os.path.join(
        args.save_dir, f'{args.pretrained_retriever}.pth'
    ) if args.pretrained_retriever else None
    # report + stats
    report_name = f"report_{experiment_name}.txt"
Пример #8
0
        self.log("loss", loss)
        self.log("acc", acc)
        return loss

    def validation_step(self, batch, batch_idx):
        img, label = batch
        out = self(img)
        loss = self.criterion(out, label)
        acc = self.accuracy(out, label)
        self.log("val_loss", loss)
        self.log("val_acc", acc)
        return loss


if __name__ == "__main__":
    experiment_name = get_experiment_name(args)
    print(experiment_name)
    logger = pl.loggers.CometLogger(
        api_key=args.api_key,
        save_dir="log",
        project_name="image_classification_pytorch",
        experiment_name=experiment_name)
    args.api_key = None  # Initialize API Key for privacy.
    net = Net(args)
    trainer = pl.Trainer(precision=args.precision,
                         fast_dev_run=args.dry_run,
                         gpus=args.gpus,
                         benchmark=args.benchmark,
                         logger=logger,
                         max_epochs=args.max_epochs,
                         weights_summary="full",
Пример #9
0
def train(model, dataset, data_augmentation, epochs, batch_size, beta, M,
          initial_lr, lr_schedule, strategy, output_dir, class_loss, cov_type):

    model_conf = model

    train_set, test_set, small_set = datasets.get_dataset(dataset)

    TRAIN_BUF, TEST_BUF = datasets.dataset_size[dataset]

    if data_augmentation:
        base_dataset = dataset.split("-")[0]
        print(f"Using image generator params from {base_dataset}")
        with open(f"./datasets/image-generator-config/{base_dataset}.yml",
                  "r") as fh:
            params = yaml.safe_load(fh)
            print(params)
        train_dataset = tf.keras.preprocessing.image.ImageDataGenerator(
            **params)
        train_dataset.fit(train_set[0])

    else:
        train_dataset = tf.data.Dataset.from_tensor_slices(train_set) \
            .shuffle(TRAIN_BUF).batch(batch_size)

    test_dataset = tf.data.Dataset.from_tensor_slices(test_set) \
        .shuffle(TEST_BUF).batch(batch_size)

    print(
        f"Training with {model} on {dataset} for {epochs} epochs (lr={initial_lr}, schedule={lr_schedule})"
    )
    print(
        f"Params: batch-size={batch_size} beta={beta} M={M} lr={initial_lr} strategy={strategy}"
    )

    optimizers, strategy_name, opt_params = losses.get_optimizer(
        strategy, lr, lr_schedule, dataset, batch_size)

    network_name, architecture = model.split("/")
    experiment_name = utils.get_experiment_name(
        f"{network_name}-{class_loss}-{cov_type}-{dataset}")

    print(f"Experiment name: {experiment_name}")
    artifact_dir = f"{output_dir}/{experiment_name}"
    print(f"Artifact directory: {artifact_dir}")

    train_log_dir = f"{artifact_dir}/logs/train"
    test_log_dir = f"{artifact_dir}/logs/test"

    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    test_summary_writer = tf.summary.create_file_writer(test_log_dir)

    # Instantiate model
    architecture = utils.parse_arch(architecture)

    model = nets.get_network(network_name)(architecture,
                                           datasets.input_dims[dataset],
                                           datasets.num_classes[dataset],
                                           cov_type,
                                           beta=beta,
                                           M=M)

    model.build(input_shape=(batch_size, *datasets.input_dims[dataset]))
    model.summary()

    print(f"Class loss: {class_loss}")
    model.class_loss = getattr(losses, f"compute_{class_loss}_class_loss")

    lr_labels = list(map(lambda x: f"lr_{x}", range(len(optimizers))))

    train_step = train_algo2 if strategy.split(
        "/")[0] == "algo2" else train_algo1

    print("Using trainstep: ", train_step)

    train_start_time = time.time()

    steps_per_epoch = int(np.ceil(train_set[0].shape[0] / batch_size))

    for epoch in range(1, epochs + 1):
        start_time = time.time()

        print(f"Epoch {epoch}")

        m, am = train_step(
            model, optimizers,
            train_dataset.flow(
                train_set[0], train_set[1], batch_size=batch_size)
            if data_augmentation else train_dataset, train_summary_writer, M,
            lr_labels, strategy_name, opt_params, epoch, steps_per_epoch)

        m = m.result().numpy()
        am = am.result().numpy()

        print(utils.format_metrics("Train", m, am))

        tfutils.log_metrics(train_summary_writer, metric_labels, m, epoch)
        tfutils.log_metrics(train_summary_writer, acc_labels, am, epoch)

        tfutils.log_metrics(
            train_summary_writer, lr_labels,
            map(lambda opt: opt._decayed_lr(tf.float32), optimizers), epoch)

        train_metrics = m.astype(float).tolist() + am.astype(float).tolist()
        end_time = time.time()

        test_metrics = evaluate(model, test_dataset, test_summary_writer, M,
                                epoch)

        print(f"--- Time elapse for current epoch {end_time - start_time}")

    train_end_time = time.time()
    elapsed_time = (train_end_time - train_start_time) / 60.

    test_metrics_dict = dict(zip(metric_labels + acc_labels, test_metrics))
    summary = dict(
        dataset=dataset,
        model=model_conf,
        strategy=strategy,
        beta=beta,
        epoch=epoch,
        M=M,
        lr=initial_lr,
        lr_schedule=lr_schedule,
        metrics=dict(
            train=dict(zip(metric_labels + acc_labels, train_metrics)),
            test=test_metrics_dict,
        ),
        class_loss=class_loss,
        cov_type=cov_type,
        batch_size=batch_size,
        elapsed_time=elapsed_time,  # in minutes
        test_accuracy_L12=test_metrics_dict["accuracy_L12"],
        data_augmentation=data_augmentation)

    if model.latent_dim == 2:
        plot_helper.plot_2d_representation(
            model,
            small_set,
            title="Epoch=%d Strategy=%s  Beta=%f M=%f" %
            (epoch, strategy, beta, M),
            path=f"{artifact_dir}/latent-representation.png")

    with train_summary_writer.as_default():
        tf.summary.text("setting",
                        json.dumps(summary, sort_keys=True, indent=4),
                        step=0)

    with open(f"{artifact_dir}/summary.yml", 'w') as f:
        print(summary)
        yaml.dump(summary, f, default_flow_style=False)

    model.save_weights(f"{artifact_dir}/model")

    print(f"Training took {elapsed_time:.4f} minutes")
    print(f"Please see artifact at: {artifact_dir}")
Пример #10
0
from baseline_train import train as train_baseline
from adaptive_train import train as train_adaptive
from config import *
from utils import get_experiment_name

if __name__ == "__main__":

    if RANK and not PAIRWISE:
        raise Exception("Invalid Configuration!! Aborting")

    experiment_name = get_experiment_name(EXPERIMENT_NAME, PAIRWISE, RANK,
                                          ADAPTIVE, LOSS)

    if ADAPTIVE:
        pretrained_model_name = None
        #        pretrained_model_name = 'baseline_L1'
        train_adaptive(experiment_name,
                       pretrained_model_name=pretrained_model_name)
    else:
        train_baseline(experiment_name)