def __init__(self, n_factors=10, model=None, sparse=False, n_iter=10, loss=None, l2=0.0, learning_rate=1e-2, optimizer_func=None, batch_size=None, random_state=None, use_cuda=False, device_id=None, logger=None, n_jobs=0, pin_memory=False, verbose=False, early_stopping=False, n_iter_no_change=10, tol=1e-4, stopping=False): super(FM, self).__init__() self._no_improvement_count = 0 self.n_factors = n_factors self.n_iter = n_iter self.batch_size = batch_size self.learning_rate = learning_rate self.l2 = l2 self._model = model self._sparse = sparse self._random_state = random_state or np.random.RandomState() self.use_cuda = use_cuda self._optimizer_func = optimizer_func self._loss_func = loss or torch.nn.MSELoss() self._logger = logger or Logger() self._n_jobs = n_jobs self._optimizer = None self._dataset = None self._sparse = sparse self._n_items = None self._n_users = None self._pin_memory = pin_memory self._disable = not verbose self._early_stopping = early_stopping self._n_iter_no_change = n_iter_no_change self._tol = tol self._stopping = stopping if device_id is not None and not self.use_cuda: raise ValueError("use_cuda flag must be true") self._device_id = device_id set_seed(self._random_state.randint(-10 ** 8, 10 ** 8), cuda=self.use_cuda)
def __init__(self, n_factors=10, model=None, sparse=False, n_iter=10, loss=None, l2=0.0, learning_rate=1e-2, optimizer_func=None, batch_size=None, random_state=None, use_cuda=False, logger=None, n_jobs=0): super(FM, self).__init__() self._n_factors = n_factors self._model = model self._n_iter = n_iter self._sparse = sparse self._batch_size = batch_size self._random_state = random_state or np.random.RandomState() self._use_cuda = use_cuda self._l2 = l2 self._learning_rate = learning_rate self._optimizer_func = optimizer_func self._loss_func = loss or torch.nn.MSELoss() self._logger = logger or Logger() self._n_jobs = n_jobs self._optimizer = None self._dataset = None self._sparse = sparse self._initialized = False set_seed(self._random_state.randint(-10 ** 8, 10 ** 8), cuda=self._use_cuda)