コード例 #1
0
ファイル: model_def.py プロジェクト: atalwalkar/determined-1
    def build_validation_data_loader(self) -> DataLoader:
        if not self.data_downloaded:
            data.download_data(self.download_directory)
            self.data_downloaded = True

        corpus = data_util.Corpus(self.download_directory)

        test_dataset = data.PTBData(
            corpus.valid,
            self.context.get_hparam("seq_len"),
            self.context.get_hparam("eval_batch_size"),
            self.context.get_hparam("bptt"),
            self.context.get_hparam("max_seq_length_delta"),
        )

        return DataLoader(
            test_dataset,
            batch_sampler=data.BatchSamp(
                test_dataset,
                self.context.get_hparam("bptt"),
                self.context.get_hparam("max_seq_length_delta"),
                valid=True,
            ),
            collate_fn=data.PadSequence(),
        )
コード例 #2
0
ファイル: model_def.py プロジェクト: wbwatkinson/determined
    def __init__(self, context: PyTorchTrialContext) -> None:
        self.context = context
        self.data_config = context.get_data_config()
        self.hparams = AttrDict(context.get_hparams())

        # Create a unique download directory for each rank so they don't overwrite each
        # other when doing distributed training.
        self.download_directory = self.data_config["data_download_dir"]
        data.download_data(self.download_directory)
        corpus = data_util.Corpus(self.download_directory)
        self.corpus = corpus
        self.ntokens = len(corpus.dictionary)
        self.hidden = None

        # This is used to store eval history and will switch to ASGD
        # once validation perplexity stops improving.
        self._last_loss = None
        self._eval_history = []
        self._last_epoch = -1

        # Define the model
        genotype = self.get_genotype_from_hps()
        self.model = self.context.wrap_model(
            RNNModel(
                self.ntokens,
                self.hparams.emsize,
                self.hparams.nhid,
                self.hparams.nhidlast,
                self.hparams.dropout,
                self.hparams.dropouth,
                self.hparams.dropoutx,
                self.hparams.dropouti,
                self.hparams.dropoute,
                genotype=genotype,
            ))
        total_params = sum(x.data.nelement() for x in self.model.parameters())
        logging.info("Model total parameters: {}".format(total_params))

        # Define the optimizer
        self._optimizer = self.context.wrap_optimizer(
            HybridSGD(
                self.model.parameters(),
                self.hparams.learning_rate,
                self.hparams.weight_decay,
                lambd=0,
                t0=0,
            ))

        # Define the LR scheduler
        self.myLR = MyLR(self._optimizer, self.hparams)
        step_mode = LRScheduler.StepMode.MANUAL_STEP
        self.wrapped_LR = self.context.wrap_lr_scheduler(self.myLR,
                                                         step_mode=step_mode)
コード例 #3
0
    def __init__(self, context: det.TrialContext) -> None:
        self.context = context
        self.data_config = context.get_data_config()
        self.hparams = AttrDict(context.get_hparams())

        # Create a unique download directory for each rank so they don't overwrite each other.
        self.download_directory = self.data_config["data_download_dir"]
        data.download_data(self.download_directory)
        corpus = data_util.Corpus(self.download_directory)
        self.corpus = corpus
        self.ntokens = len(corpus.dictionary)
        self.hidden = None

        # This is used to store eval history and will switch to ASGD
        # once validation perplexity stops improving.
        self._last_loss = None
        self._eval_history = []
        self._last_epoch = -1
コード例 #4
0
ファイル: data.py プロジェクト: determined-ai/pedl-examples
def make_data_loaders(experiment_config: Dict[str, Any], hparams: Dict[str,
                                                                       Any]):
    """
    Required method to load in the datasets
    returns: PEDL DataLoader
    """
    corpus = data.Corpus(pedl.get_data_config().get("data_loc"))

    train_dataset = PTBData(corpus.train, pedl.get_hyperparameter("seq_len"),
                            pedl.get_hyperparameter("batch_size"))
    test_dataset = PTBData(corpus.valid, pedl.get_hyperparameter("seq_len"),
                           pedl.get_hyperparameter("eval_batch_size"))

    return (
        DataLoader(train_dataset,
                   batch_sampler=BatchSamp(train_dataset),
                   collate_fn=PadSequence()),
        DataLoader(
            test_dataset,
            batch_sampler=BatchSamp(test_dataset, valid=True),
            collate_fn=PadSequence(),
        ),
    )
コード例 #5
0
ファイル: model_def.py プロジェクト: atalwalkar/determined-1
    def build_training_data_loader(self) -> DataLoader:
        if not self.data_downloaded:
            data.download_data(self.download_directory)
            self.data_downloaded = True

        corpus = data_util.Corpus(self.download_directory)

        train_dataset = data.PTBData(
            corpus.train,
            self.context.get_hparam("seq_len"),
            self.context.get_per_slot_batch_size(),
            self.context.get_hparam("bptt"),
            self.context.get_hparam("max_seq_length_delta"),
        )
        return DataLoader(
            train_dataset,
            batch_sampler=data.BatchSamp(
                train_dataset,
                self.context.get_hparam("bptt"),
                self.context.get_hparam("max_seq_length_delta"),
            ),
            collate_fn=data.PadSequence(),
        )