def factorization_model(num_embeddings, bloom): if bloom: user_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM, num_hash_functions=NUM_HASH_FUNCTIONS) item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM, num_hash_functions=NUM_HASH_FUNCTIONS) else: user_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM) item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM) network = BilinearNet(num_embeddings, num_embeddings, user_embedding_layer=user_embeddings, item_embedding_layer=item_embeddings) model = ImplicitFactorizationModel(loss='adaptive_hinge', n_iter=N_ITER, embedding_dim=EMBEDDING_DIM, batch_size=2048, learning_rate=1e-2, l2=1e-6, representation=network, use_cuda=CUDA) return model
def __init__(self, num_users, num_items, embedding_dim=32, user_embedding_layer=None, item_embedding_layer=None, sparse=False): super(BilinearNet, self).__init__() self.embedding_dim = embedding_dim if user_embedding_layer is not None: self.user_embeddings = user_embedding_layer else: self.user_embeddings = ScaledEmbedding(num_users, embedding_dim, sparse=sparse) if item_embedding_layer is not None: self.item_embeddings = item_embedding_layer else: self.item_embeddings = ScaledEmbedding(num_items, embedding_dim, sparse=sparse) self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse) self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)
def __init__(self, num_items, embedding_dim=32, num_mixtures=4, item_embedding_layer=None, sparse=False): super(MixtureLSTMNet, self).__init__() self.embedding_dim = embedding_dim self.num_mixtures = num_mixtures if item_embedding_layer is not None: self.item_embeddings = item_embedding_layer else: self.item_embeddings = ScaledEmbedding(num_items, embedding_dim, padding_idx=PADDING_IDX, sparse=sparse) self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse, padding_idx=PADDING_IDX) self.lstm = nn.LSTM(batch_first=True, input_size=embedding_dim, hidden_size=embedding_dim) self.projection = nn.Conv1d(embedding_dim, embedding_dim * self.num_mixtures * 2, kernel_size=1)
def __init__(self, num_items, embedding_dim=32, item_embedding_layer=None, sparse=False): super(LSTMNet, self).__init__() self.embedding_dim = embedding_dim if item_embedding_layer is not None: self.item_embeddings = item_embedding_layer else: self.item_embeddings = ScaledEmbedding(num_items, embedding_dim, padding_idx=PADDING_IDX, sparse=sparse) self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse, padding_idx=PADDING_IDX) self.lstm = nn.LSTM(batch_first=True, input_size=embedding_dim, hidden_size=embedding_dim)
def build_sequence_model(hyperparameters, train, random_state): h = hyperparameters set_seed(42, CUDA) if h['compression_ratio'] < 1.0: item_embeddings = BloomEmbedding( train.num_items, h['embedding_dim'], compression_ratio=h['compression_ratio'], num_hash_functions=4, padding_idx=0) else: item_embeddings = ScaledEmbedding(train.num_items, h['embedding_dim'], padding_idx=0) network = LSTMNet(train.num_items, h['embedding_dim'], item_embedding_layer=item_embeddings) model = ImplicitSequenceModel(loss=h['loss'], n_iter=h['n_iter'], batch_size=h['batch_size'], learning_rate=h['learning_rate'], embedding_dim=h['embedding_dim'], l2=h['l2'], representation=network, use_cuda=CUDA, random_state=np.random.RandomState(42)) return model
def __init__(self, num_items, embedding_dim=32, kernel_width=3, dilation=1, num_layers=1, nonlinearity='tanh', residual_connections=True, sparse=False, benchmark=True, item_embedding_layer=None): super(CNNNet, self).__init__() cudnn.benchmark = benchmark self.embedding_dim = embedding_dim self.kernel_width = _to_iterable(kernel_width, num_layers) self.dilation = _to_iterable(dilation, num_layers) if nonlinearity == 'tanh': self.nonlinearity = F.tanh elif nonlinearity == 'relu': self.nonlinearity = F.relu else: raise ValueError('Nonlinearity must be one of (tanh, relu)') self.residual_connections = residual_connections if item_embedding_layer is not None: self.item_embeddings = item_embedding_layer else: self.item_embeddings = ScaledEmbedding(num_items, embedding_dim, padding_idx=PADDING_IDX, sparse=sparse) self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse, padding_idx=PADDING_IDX) self.cnn_layers = [ nn.Conv2d(embedding_dim, embedding_dim, (_kernel_width, 1), dilation=(_dilation, 1)) for (_kernel_width, _dilation) in zip(self.kernel_width, self.dilation) ] for i, layer in enumerate(self.cnn_layers): self.add_module('cnn_{}'.format(i), layer)
def sequence_model(num_embeddings, bloom): if bloom: item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM, num_hash_functions=NUM_HASH_FUNCTIONS) else: item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM) network = LSTMNet(num_embeddings, EMBEDDING_DIM, item_embedding_layer=item_embeddings) model = ImplicitSequenceModel(loss='adaptive_hinge', n_iter=N_ITER, batch_size=512, learning_rate=1e-3, l2=1e-2, representation=network, use_cuda=CUDA) return model
def __init__(self, num_items, embedding_dim=32, item_embedding_layer=None, sparse=False): super(PoolNet, self).__init__() self.embedding_dim = embedding_dim if item_embedding_layer is not None: self.item_embeddings = item_embedding_layer else: self.item_embeddings = ScaledEmbedding(num_items, embedding_dim, padding_idx=PADDING_IDX, sparse=sparse) self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse, padding_idx=PADDING_IDX)