Exemple #1
0
    def _build_high_layers(self, args):
        # RS
            # [batch_size]
        # self.scores = tf.reduce_sum(self.user_embeddings * self.item_embeddings, axis=1)
        # self.scores_normalized = tf.nn.sigmoid(self.scores)
        #
        # # KGE
        # # [batch_size, dim * 2]
        # self.head_relation_concat = tf.concat([self.head_embeddings, self.relation_embeddings], axis=1)
        # for _ in range(args.H):
        #     self.kge_mlp = MLP(input_dim=args.dim * 2, output_dim=args.dim * 2)
            # [batch_size, dim]
            # self.head_relation_concat = kge_mlp(self.head_relation_concat)
            # self.vars_kge.extend(kge_mlp.vars)
        for _ in range(self.args.H - 1):
            self.kge_mlp.append(MLP(input_dim=self.args.dim * 2, output_dim=self.args.dim * 2))
        self.kge_pred_mlp = MLP(input_dim=args.dim * 2, output_dim=args.dim)

        for i in range(self.n_iter):
            # print(i)
            if i == self.n_iter - 1:
                aggregator = self.aggregator_class(self.batch_size, self.dim, act=tf.nn.tanh)
            else:
                aggregator = self.aggregator_class(self.batch_size, self.dim)
            self.aggregates.append(aggregator)
Exemple #2
0
 def __init__(self, din, dtreat, y_scaler, args):
     super().__init__(args)
     self.repnet = ConvNet(din=din, dout=args.hidden_rep, C=[
                           args.c_rep[0], args.c_rep[1]])
     self.outnet = MLP(din=3 * args.hidden_rep + dtreat,
                       dout=1, C=[args.c_out[0], args.c_out[1]])
     self.params = list(self.repnet.parameters()) + \
         list(self.outnet.parameters())
     self.optimizer = optim.Adam(
         params=self.params, lr=args.lr, weight_decay=args.wd)
     self.scheduler = StepLR(self.optimizer, step_size=50, gamma=0.1)
     self.y_scaler = y_scaler
Exemple #3
0
    def _build_low_layers(self, args):
        # self.user_emb_matrix = tf.Variable(tf.random.truncated_normal([self.n_user, args.dim]))
        self.user_emb_matrix=tf.keras.layers.Embedding(self.n_user, args.dim)
        # self.item_emb_matrix = tf.Variable(tf.random.truncated_normal([self.n_item, args.dim]))
        self.item_emb_matrix = tf.keras.layers.Embedding(self.n_item, args.dim)
        # self.entity_emb_matrix = tf.Variable(tf.random.truncated_normal([self.n_entity, args.dim]))
        self.entity_emb_matrix = tf.keras.layers.Embedding(self.n_entity, args.dim)
        # self.relation_emb_matrix = tf.Variable(tf.random.truncated_normal([self.n_relation, args.dim]))
        self.relation_emb_matrix = tf.keras.layers.Embedding(self.n_relation, args.dim)
        # self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices)
        # self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices)
        # self.head_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.head_indices)
        # self.relation_embeddings = tf.nn.embedding_lookup(self.relation_emb_matrix, self.relation_indices)
        # self.tail_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.tail_indices)

        for _ in range(args.L):
            self.user_mlp.append(MLP(input_dim=args.dim, output_dim=args.dim))
            self.tail_mlp.append(MLP(input_dim=args.dim, output_dim=args.dim))
            self.cc_unit.append(CrossCompressUnit(args.dim))
Exemple #4
0
class CFRConv(Base):
    def __init__(self, din, dtreat, y_scaler, args):
        super().__init__(args)
        self.repnet = ConvNet(din=din, dout=args.hidden_rep, C=[
                              args.c_rep[0], args.c_rep[1]])
        self.outnet = MLP(din=3 * args.hidden_rep + dtreat,
                          dout=1, C=[args.c_out[0], args.c_out[1]])
        self.params = list(self.repnet.parameters()) + \
            list(self.outnet.parameters())
        self.optimizer = optim.Adam(
            params=self.params, lr=args.lr, weight_decay=args.wd)
        self.scheduler = StepLR(self.optimizer, step_size=50, gamma=0.1)
        self.y_scaler = y_scaler

    def forward(self, x, z):
        with torch.no_grad():
            _, x_rep_stack = self.repnet(x)
            x_rep = x_rep_stack[2]
            y_hat = self.outnet(torch.cat((x_rep, z), 1))
            y_hat = self.y_scaler.inverse_transform(
                y_hat.detach().cpu().numpy())
        return y_hat
Exemple #5
0
class DMLShared(Base):
    def __init__(self, din, dtreat, y_scaler, args):
        super().__init__(args)
        self.xnet = DMLLinear(din=3 * args.hidden_rep)
        self.repnet = ConvNet(din=din,
                              dout=args.hidden_rep,
                              C=[args.c_rep[0], args.c_rep[1]])
        self.outnet = MLP(din=3 * args.hidden_rep + dtreat,
                          dout=1,
                          C=[args.c_out[0], args.c_out[1]])
        self.params = list(self.repnet.parameters()) + \
            list(self.outnet.parameters())
        self.optimizer = optim.Adam(params=self.params,
                                    lr=args.lr,
                                    weight_decay=args.wd)
        self.scheduler = StepLR(self.optimizer, step_size=args.step, gamma=0.1)
        self.y_scaler = y_scaler

    def forward(self, x, z):
        _, x_rep_stack = self.repnet(x)
        x_rep = x_rep_stack[2]
        y_hat_x = self.xnet(x_rep)

        _, x_rep_stack = self.repnet(x)
        x_rep = x_rep_stack[2]
        y_hat = self.outnet(torch.cat((x_rep, z), 1))

        return y_hat_x + y_hat

    def fit(self, dataloader, x_train, M_train, Z_train, x_test, M_test,
            Z_test, target_outcome):
        losses = []
        print('                        within sample,      out of sample')
        print('           [Train MSE], [RMSE, PEHE, ATE], [RMSE, PEHE, ATE]')
        for epoch in range(self.args.epoch):
            epoch_loss = 0
            n = 0
            for (x, y, z) in dataloader:
                x = x.to(device=self.args.device)
                y = y.to(device=self.args.device)
                z = z.to(device=self.args.device)
                self.optimizer.zero_grad()
                y_hat = self.forward(x, z)
                loss = self.criterion(y_hat, y.reshape([-1, 1]))
                loss.backward()

                mse = self.mse(
                    self.y_scaler.inverse_transform(
                        y_hat.detach().cpu().numpy()),
                    self.y_scaler.inverse_transform(
                        y.reshape([-1, 1]).detach().cpu().numpy()))
                self.optimizer.step()
                epoch_loss += mse * y.shape[0]
                n += y.shape[0]

            self.scheduler.step()
            epoch_loss = epoch_loss / n
            losses.append(epoch_loss)

            if epoch % 10 == 0:
                with torch.no_grad():
                    # ATE, sqrt(pehe), cmse
                    within_pm = self.get_score(self, x_train, M_train, Z_train,
                                               target_outcome)
                    outof_pm = self.get_score(self, x_test, M_test, Z_test,
                                              target_outcome)
                print(
                    '[Epoch: %d] [%.3f], [%.3f, %.3f, %.3f], [%.3f, %.3f, %.3f] '
                    % (epoch, epoch_loss, within_pm['RMSE'], within_pm['PEHE'],
                       within_pm['ATE'], outof_pm['RMSE'], outof_pm['PEHE'],
                       outof_pm['ATE']))

        return within_pm, outof_pm, losses
Exemple #6
0
                        eps=RMSprop_list[2])
elif optimizer_flag == 'AdaDelta':
    optimizer = AdaDelta(p=Adadelta_list[0], eps=Adadelta_list[1])
elif optimizer_flag == 'Adam':
    optimizer = Adam(lr=Adam_list[0],
                     p1=Adam_list[1],
                     p2=Adam_list[2],
                     eps=Adam_list[3])
elif optimizer_flag == 'RMSpropGraves':
    optimizer = RMSpropGraves(lr=RMSpropGraves_list[0],
                              p=RMSpropGraves_list[1],
                              eps=RMSpropGraves_list[2])
elif optimizer_flag == 'SMORMS3':
    optimizer = SMORMS3(lr=SMORMS3_list[0], eps=SMORSM3_list[1])

model = MLP(model_list)

#optimizerの設定
optimizer.setup(model)

#n_epochとbatchsizeを変更可能
train_loss_list, train_acc_list, test_loss_list, test_acc_list = learning(
    model, optimizer, n_epoch, batchsize)

if gpu:
    #cpuへの変換
    train_loss_list = np.asnumpy(train_loss_list)
    train_acc_list = np.asnumpy(train_acc_list)
    test_loss_list = np.asnumpy(test_loss_list)
    test_acc_list = np.asnumpy(test_acc_list)
    import numpy as np  # for plot