コード例 #1
0
ファイル: active.py プロジェクト: d3v3l0/selection-via-proxy
    # Verify there is enough training data for validation,
    #   the initial subset, and the selection rounds.
    validate_splits(train_dataset, validation, initial_subset, rounds)

    # Create the test dataset.
    test_dataset = None
    if track_test_acc:
        test_dataset = create_dataset(dataset, datasets_dir, train=False)

    # Calculate the number of classes (e.g., 2 or 5) so the model has
    #   the right dimension for its output.
    num_classes = train_dataset.classes

    # Split the training dataset between training and validation.
    unlabeled_pool, dev_indices = utils.split_indices(train_dataset,
                                                      validation,
                                                      run_dir,
                                                      shuffle=shuffle)

    # Create the proxy to select which data points to label. If the
    #   selections were precomputed in another run or elsewhere, we can
    #   ignore this step.
    if precomputed_selection is None:
        # Use a partial so the appropriate model can be created without
        #   arguments.
        proxy_partial = partial(create_model_and_optimizer,
                                arch=proxy_arch,
                                num_classes=num_classes,
                                optimizer=proxy_optimizer,
                                learning_rate=proxy_learning_rates[0],
                                momentum=proxy_momentum,
                                weight_decay=proxy_weight_decay)
コード例 #2
0
    # Calculate the number of classes (e.g., 1000) so the model has
    #   the right dimension for its output.
    num_classes = 1_000  # type: ignore

    # Create the proxy and use it to select which data points should be
    #   used to train the final target model. If the selections were
    #   precomputed in another run or elsewhere, we can ignore this
    #   step.
    if precomputed_selection is None:
        # Create a directory for the proxy results to avoid confusion.
        proxy_run_dir = os.path.join(run_dir, 'proxy')
        os.makedirs(proxy_run_dir, exist_ok=True)

        # Split the training dataset between training and validation.
        train_indices, dev_indices = utils.split_indices(train_dataset,
                                                         validation,
                                                         proxy_run_dir,
                                                         shuffle=shuffle)
        # Create data loaders for training, validation, and testing.
        train_loader, dev_loader, test_loader = create_loaders(
            train_dataset,
            batch_size=proxy_batch_size,
            eval_batch_size=proxy_eval_batch_size,
            test_dataset=test_dataset,
            use_cuda=use_cuda,
            num_workers=num_workers,
            eval_num_workers=eval_num_workers,
            indices=(train_indices, dev_indices))

        # Create the model and optimizer for training.
        model, _proxy_optimizer = create_model_and_optimizer(
            arch=proxy_arch,
コード例 #3
0
def create_loaders(
        train_dataset: Dataset,
        batch_size: int = 128,
        eval_batch_size: int = 128,
        validation: int = 0,
        run_dir: Optional[str] = None,
        dev_dataset: Optional[Dataset] = None,
        test_dataset: Optional[Dataset] = None,
        shuffle: bool = False,
        use_cuda: bool = False,
        num_workers: int = 0,
        eval_num_workers: Optional[int] = None,
        indices: Optional[Tuple[List[int], List[int]]] = None) -> Loaders:
    """
    Create data loaders for train, validation, and test.

    Parameters
    ----------
    train_dataset : Dataset
    batch_size : int, default 128
    eval_batch_size : int, defeault 128
    validation : int, default 0
    run_dir : str or None, default None
    dev_dataset : Dataset or None, default None
    test_dataset : Dataset or None, default None
    shuffle : bool, default False
    use_cuda : bool, default False
    num_workers : int, default 0
    eval_num_workers : int or None, default None
    indices : Tuple[List[int], List[int]] or None, default None


    Returns
    -------
    train_loader : DataLoader
    dev_loader : DataLoader or None
    test_loader : DataLoader or None
    """
    # Maybe split the training dataset between training and validation.
    dev_indices: Optional[List[int]] = None
    if indices is None:
        if validation > 0:
            train_indices, dev_indices = utils.split_indices(train_dataset,
                                                             validation,
                                                             run_dir,
                                                             shuffle=shuffle)
        else:
            train_indices = np.arange(len(train_dataset), dtype=np.int64)
            dev_indices = None
    else:
        train_indices, dev_indices = indices

    # Create training data loader.
    train_sampler = SubsetRandomSampler(train_indices)
    train_loader = torch.utils.data.DataLoader(DatasetWithIndex(train_dataset),
                                               sampler=train_sampler,
                                               batch_size=batch_size,
                                               num_workers=num_workers,
                                               pin_memory=use_cuda)

    # Use the same number of workers for everything.
    if eval_num_workers is None:
        eval_num_workers = num_workers

    dev_loader = None
    # Create validation data loader.
    if dev_indices is not None and len(dev_indices) > 0:
        # Use part of the training dataset for validation.
        print('Using {} examples from training'
              ' for validation'.format(len(dev_indices)))
        dev_sampler = SubsetRandomSampler(dev_indices)
        dev_loader = torch.utils.data.DataLoader(
            DatasetWithIndex(train_dataset),
            sampler=dev_sampler,
            batch_size=eval_batch_size,
            num_workers=eval_num_workers,
            pin_memory=use_cuda)
    elif dev_dataset is not None:
        # Use a separate dataset for valdiation.
        dev_loader = torch.utils.data.DataLoader(DatasetWithIndex(dev_dataset),
                                                 batch_size=eval_batch_size,
                                                 num_workers=eval_num_workers,
                                                 pin_memory=use_cuda)

    test_loader = None
    # Create test data loader.
    if test_dataset is not None:
        test_loader = DataLoader(DatasetWithIndex(test_dataset),
                                 batch_size=eval_batch_size,
                                 shuffle=False,
                                 num_workers=eval_num_workers,
                                 pin_memory=use_cuda)

    return train_loader, dev_loader, test_loader