예제 #1
0
 def __init__(self,
              train: interactions_like_input = None,
              val: interactions_like_input = None,
              embedding_dim: int = 30,
              dropout_p: float = 0.0,
              sparse: bool = False,
              lr: float = 1e-3,
              bias_lr: Optional[Union[float, str]] = 1e-2,
              lr_scheduler_func: Optional[Callable] = partial(
                  ReduceLROnPlateau, patience=1, verbose=True),
              weight_decay: float = 0.0,
              optimizer: Union[str, Callable] = 'adam',
              bias_optimizer: Optional[Union[str, Callable]] = 'sgd',
              loss: Union[str, Callable] = 'hinge',
              metadata_for_loss: Optional[Dict[str, torch.tensor]] = None,
              metadata_for_loss_weights: Optional[Dict[str, float]] = None,
              y_range: Optional[Tuple[float, float]] = None,
              load_model_path: Optional[str] = None,
              map_location: Optional[str] = None):
     super().__init__(**get_init_arguments())
예제 #2
0
    def __init__(self,
                 train: interactions_like_input = None,
                 val: interactions_like_input = None,
                 item_metadata: Union[torch.tensor, pd.DataFrame, np.array] = None,
                 trained_model: MatrixFactorizationModel = None,
                 metadata_layers_dims: Optional[List[int]] = None,
                 combined_layers_dims: List[int] = [128, 64, 32],
                 freeze_embeddings: bool = True,
                 dropout_p: float = 0.0,
                 lr: float = 1e-3,
                 lr_scheduler_func: Optional[Callable] = partial(ReduceLROnPlateau,
                                                                 patience=1,
                                                                 verbose=True),
                 weight_decay: float = 0.0,
                 optimizer: Union[str, Callable] = 'adam',
                 loss: Union[str, Callable] = 'hinge',
                 metadata_for_loss: Optional[Dict[str, torch.tensor]] = None,
                 metadata_for_loss_weights: Optional[Dict[str, float]] = None,
                 # y_range: Optional[Tuple[float, float]] = None,
                 load_model_path: Optional[str] = None,
                 map_location: Optional[str] = None):
        item_metadata_num_cols = None
        if load_model_path is None:
            if trained_model is None:
                raise ValueError('Must provide ``trained_model`` for ``HybridPretrainedModel``.')

            if item_metadata is None:
                raise ValueError('Must provide item metadata for ``HybridPretrainedModel``.')
            elif isinstance(item_metadata, pd.DataFrame):
                item_metadata = torch.from_numpy(item_metadata.to_numpy())
            elif isinstance(item_metadata, np.ndarray):
                item_metadata = torch.from_numpy(item_metadata)

            item_metadata = item_metadata.float()

            item_metadata_num_cols = item_metadata.shape[1]

        super().__init__(**get_init_arguments(),
                         item_metadata_num_cols=item_metadata_num_cols)
예제 #3
0
    def __init__(self,
                 train: interactions_like_input = None,
                 val: interactions_like_input = None,
                 lr: float = 1e-3,
                 lr_scheduler_func: Optional[Callable] = None,
                 weight_decay: float = 0.0,
                 optimizer: Union[str, Callable] = 'adam',
                 loss: Union[str, Callable] = 'hinge',
                 metadata_for_loss: Optional[Dict[str, torch.tensor]] = None,
                 metadata_for_loss_weights: Optional[Dict[str, float]] = None,
                 load_model_path: Optional[str] = None,
                 map_location: Optional[str] = None,
                 **kwargs):
        if isinstance(train, Interactions):
            train = InteractionsDataLoader(interactions=train, shuffle=True)
        if isinstance(val, Interactions):
            val = InteractionsDataLoader(interactions=val, shuffle=False)

        super().__init__()

        # save datasets as class-level attributes and NOT ``hparams`` so model checkpointing /
        # saving can complete faster
        self.train_loader = train
        self.val_loader = val

        # potential issue with PyTorch Lightning is that a function cannot be saved as a
        # hyperparameter, so we will sidestep this by setting it as a class-level attribute
        # https://github.com/PyTorchLightning/pytorch-lightning/issues/2444
        self.lr_scheduler_func = lr_scheduler_func
        self.loss = loss
        self.optimizer = optimizer
        self.bias_optimizer = kwargs.get('bias_optimizer')

        if load_model_path is not None:
            # we are loading in a previously-saved model, not creating a new one
            self._load_model_init_helper(load_model_path=load_model_path,
                                         map_location=map_location,
                                         **kwargs)
        else:
            if self.train_loader is None:
                pass
            elif self.val_loader is not None:
                assert self.train_loader.num_users == self.val_loader.num_users, (
                    'Both training and val ``num_users`` must equal: '
                    f'{self.train_loader.num_users} != {self.val_loader.num_users}.'
                )
                assert self.train_loader.num_items == self.val_loader.num_items, (
                    'Both training and val ``num_items`` must equal: '
                    f'{self.train_loader.num_items} != {self.val_loader.num_items}.'
                )

                num_negative_samples_error = (
                    'Training and val ``num_negative_samples`` property must both equal ``1``'
                    f' or both be greater than ``1``, not: {self.train_loader.num_items} and'
                    f' {self.val_loader.num_items}, respectively.')
                if self.train_loader.num_negative_samples == 1:
                    assert self.val_loader.num_negative_samples == 1, num_negative_samples_error
                elif self.train_loader.num_negative_samples > 1:
                    assert self.val_loader.num_negative_samples > 1, num_negative_samples_error
                else:
                    raise ValueError(
                        '``self.train_loader.num_negative_samples`` must be greater than ``0``, not'
                        f' {self.train_loader.num_negative_samples}.')

            # saves all passed-in parameters
            init_args = get_init_arguments(
                exclude=['train', 'val', 'item_metadata', 'trained_model'],
                verbose=False,
            )

            self.save_hyperparameters(init_args, *kwargs.keys())

            self.hparams.num_users = self.train_loader.num_users
            self.hparams.num_items = self.train_loader.num_items
            self.hparams.n_epochs_completed_ = 0

            self._configure_loss()

            # check weight decay and sparsity
            if hasattr(self.hparams, 'sparse'):
                if self.hparams.sparse and self.hparams.weight_decay != 0:
                    warnings.warn(
                        textwrap.dedent(f'''
                            ``weight_decay`` value must be 0 when ``sparse`` is flagged, not
                            {self.hparams.weight_decay}. Setting to 0.
                            ''').replace('\n', ' ').strip())
                    self.hparams.weight_decay = 0.0

            # set up the actual model
            self._setup_model(**kwargs)
예제 #4
0
        def __init__(self, var_1, var_2=2468, **kwargs):
            super().__init__()

            self.actual = get_init_arguments(exclude=['var_4'], verbose=True)
예제 #5
0
        def __init__(self, var_1, var_2=2468, **kwargs):
            super().__init__()

            self.actual = get_init_arguments(exclude=['var_2', 'var_3'])
예제 #6
0
        def __init__(self, var_1, var_2=12345, **kwargs):
            super().__init__()

            self.actual = get_init_arguments()
예제 #7
0
        def __init__(self, **kwargs):
            super().__init__()

            self.actual = get_init_arguments()
예제 #8
0
        def __init__(self, var_1, var_2=54321):
            super().__init__()

            self.actual = get_init_arguments()