Ejemplo n.º 1
0
class Net(nn.Module):
    def __init__(self, args, dev_id):
        super(Net, self).__init__()
        self._act = get_activation(args.model_activation)
        self.encoder = GCMCLayer(args.rating_vals,
                                 args.src_in_units,
                                 args.dst_in_units,
                                 args.gcn_agg_units,
                                 args.gcn_out_units,
                                 args.gcn_dropout,
                                 args.gcn_agg_accum,
                                 agg_act=self._act,
                                 share_user_item_param=args.share_param,
                                 device=dev_id)
        if args.mix_cpu_gpu and args.use_one_hot_fea:
            # if use_one_hot_fea, user and movie feature is None
            # W can be extremely large, with mix_cpu_gpu W should be stored in CPU
            self.encoder.partial_to(dev_id)
        else:
            self.encoder.to(dev_id)

        self.decoder = BiDecoder(in_units=args.gcn_out_units,
                                 num_classes=len(args.rating_vals),
                                 num_basis=args.gen_r_num_basis_func)
        self.decoder.to(dev_id)

    def forward(self, compact_g, frontier, ufeat, ifeat,
                possible_rating_values):
        user_out, movie_out = self.encoder(frontier, ufeat, ifeat)
        pred_ratings = self.decoder(compact_g, user_out, movie_out)
        return pred_ratings
Ejemplo n.º 2
0
    def __init__(self, args, dev_id):
        super(Net, self).__init__()
        self._act = get_activation(args.model_activation)
        self.encoder = nn.ModuleList()
        self.encoder.append(
            GCMCLayer(args.rating_vals,
                      args.src_in_units,
                      args.dst_in_units,
                      args.gcn_agg_units,
                      args.gcn_out_units,
                      args.gcn_dropout,
                      args.gcn_agg_accum,
                      agg_act=self._act,
                      share_user_item_param=args.share_param,
                      device=dev_id))

        self.rating_vals = args.rating_vals

        self.gcn_agg_accum = args.gcn_agg_accum
        self.rating_vals = args.rating_vals
        self.device = dev_id
        self.gcn_agg_units = args.gcn_agg_units
        self.src_in_units = args.src_in_units
        self.batch_size = args.minibatch_size
        for i in range(1, args.layers):
            if args.gcn_agg_accum == 'stack':
                gcn_out_units = args.gcn_out_units * len(args.rating_vals)
            else:
                gcn_out_units = args.gcn_out_units
            self.encoder.append(
                GCMCLayer(args.rating_vals,
                          args.gcn_out_units,
                          args.gcn_out_units,
                          gcn_out_units,
                          args.gcn_out_units,
                          args.gcn_dropout - i * 0.1,
                          args.gcn_agg_accum,
                          agg_act=self._act,
                          share_user_item_param=args.share_param,
                          ini=False,
                          device=dev_id))

        if args.mix_cpu_gpu and args.use_one_hot_fea:
            # if use_one_hot_fea, user and movie feature is None
            # W can be extremely large, with mix_cpu_gpu W should be stored in CPU
            self.encoder.partial_to(dev_id)
        else:
            self.encoder.to(dev_id)

        self.decoder = BiDecoder(in_units=args.gcn_out_units,
                                 num_classes=len(args.rating_vals),
                                 num_basis=args.gen_r_num_basis_func)
        self.decoder.to(dev_id)
Ejemplo n.º 3
0
    def __init__(self, args):
        super(Net, self).__init__()
        self._act = get_activation(args.model_activation)
        self.encoder = nn.ModuleList()
        self.encoder.append(
            GCMCLayer(args.rating_vals,
                      args.src_in_units,
                      args.dst_in_units,
                      args.gcn_agg_units,
                      args.gcn_out_units,
                      args.gcn_dropout,
                      args.gcn_agg_accum,
                      agg_act=self._act,
                      share_user_item_param=args.share_param,
                      device=args.device))
        self.gcn_agg_accum = args.gcn_agg_accum
        self.rating_vals = args.rating_vals
        self.device = args.device
        self.gcn_agg_units = args.gcn_agg_units
        self.src_in_units = args.src_in_units
        for i in range(1, args.layers):
            if args.gcn_agg_accum == 'stack':
                gcn_out_units = args.gcn_out_units * len(args.rating_vals)
            else:
                gcn_out_units = args.gcn_out_units
            self.encoder.append(
                GCMCLayer(args.rating_vals,
                          args.gcn_out_units,
                          args.gcn_out_units,
                          gcn_out_units,
                          args.gcn_out_units,
                          args.gcn_dropout - i * 0.1,
                          args.gcn_agg_accum,
                          agg_act=self._act,
                          share_user_item_param=args.share_param,
                          ini=False,
                          device=args.device))

        if args.decoder == "Bi":
            self.decoder = BiDecoder(
                in_units=args.gcn_out_units,  #* args.layers,
                num_classes=len(args.rating_vals),
                num_basis=args.gen_r_num_basis_func)
            '''
            self.decoder2 = MLPDecoder(in_units= args.gcn_out_units * 2,
                                     num_classes=len(args.rating_vals),
                                     num_basis=args.gen_r_num_basis_func)
            '''
        elif args.decoder == "MLP":
            if args.loss_func == "CE":
                num_classes = len(args.rating_vals)
            else:
                num_classes = 1
            self.decoder = MLPDecoder(in_units=args.gcn_out_units *
                                      args.layers,
                                      num_classes=num_classes,
                                      num_basis=args.gen_r_num_basis_func)
        self.rating_vals = args.rating_vals
Ejemplo n.º 4
0
    def __init__(self, args, dev_id):
        super(Net, self).__init__()
        self._act = get_activation(args.model_activation)
        self.encoder = GCMCLayer(args.rating_vals,
                                 args.src_in_units,
                                 args.dst_in_units,
                                 args.gcn_agg_units,
                                 args.gcn_out_units,
                                 args.gcn_dropout,
                                 args.gcn_agg_accum,
                                 agg_act=self._act,
                                 share_user_item_param=args.share_param,
                                 device=dev_id)
        if args.mix_cpu_gpu and args.use_one_hot_fea:
            # if use_one_hot_fea, user and movie feature is None
            # W can be extremely large, with mix_cpu_gpu W should be stored in CPU
            self.encoder.partial_to(dev_id)
        else:
            self.encoder.to(dev_id)

        self.decoder = BiDecoder(in_units=args.gcn_out_units,
                                 num_classes=len(args.rating_vals),
                                 num_basis=args.gen_r_num_basis_func)
        self.decoder.to(dev_id)
Ejemplo n.º 5
0
Archivo: train.py Proyecto: hacors/Drug
 def __init__(self, args):
     super(Net, self).__init__()
     self._act = get_activation(args.model_activation)
     self.encoder = GCMCLayer(args.rating_vals,
                              args.src_in_units,
                              args.dst_in_units,
                              args.gcn_agg_units,
                              args.gcn_out_units,
                              args.gcn_dropout,
                              args.gcn_agg_accum,
                              agg_act=self._act,
                              share_user_item_param=args.share_param)
     self.decoder = BiDecoder(args.rating_vals,
                              in_units=args.gcn_out_units,
                              num_basis_functions=args.gen_r_num_basis_func)
Ejemplo n.º 6
0
 def __init__(self, args, **kwargs):
     super(Net, self).__init__(**kwargs)
     self._act = get_activation(args.model_activation)
     with self.name_scope():
         self.encoder = GCMCLayer(src_key=args.src_key,
                                  dst_key=args.dst_key,
                                  src_in_units=args.src_in_units,
                                  dst_in_units=args.dst_in_units,
                                  agg_units=args.gcn_agg_units,
                                  out_units=args.gcn_out_units,
                                  num_links=args.nratings,
                                  dropout_rate=args.gcn_dropout,
                                  agg_accum=args.gcn_agg_accum,
                                  agg_act=args.model_activation,
                                  prefix='enc_')
         if args.gen_r_use_classification:
             self.gen_ratings = BiDecoder(
                 in_units=args.gcn_out_units,
                 out_units=args.nratings,
                 num_basis_functions=args.gen_r_num_basis_func,
                 prefix='gen_rating')
         else:
             self.gen_ratings = InnerProductLayer(prefix='gen_rating')
Ejemplo n.º 7
0
class Net(nn.Module):
    def __init__(self, args, dev_id):
        super(Net, self).__init__()
        self._act = get_activation(args.model_activation)
        self.encoder = nn.ModuleList()
        self.encoder.append(
            GCMCLayer(args.rating_vals,
                      args.src_in_units,
                      args.dst_in_units,
                      args.gcn_agg_units,
                      args.gcn_out_units,
                      args.gcn_dropout,
                      args.gcn_agg_accum,
                      agg_act=self._act,
                      share_user_item_param=args.share_param,
                      device=dev_id))

        self.rating_vals = args.rating_vals

        self.gcn_agg_accum = args.gcn_agg_accum
        self.rating_vals = args.rating_vals
        self.device = dev_id
        self.gcn_agg_units = args.gcn_agg_units
        self.src_in_units = args.src_in_units
        self.batch_size = args.minibatch_size
        for i in range(1, args.layers):
            if args.gcn_agg_accum == 'stack':
                gcn_out_units = args.gcn_out_units * len(args.rating_vals)
            else:
                gcn_out_units = args.gcn_out_units
            self.encoder.append(
                GCMCLayer(args.rating_vals,
                          args.gcn_out_units,
                          args.gcn_out_units,
                          gcn_out_units,
                          args.gcn_out_units,
                          args.gcn_dropout - i * 0.1,
                          args.gcn_agg_accum,
                          agg_act=self._act,
                          share_user_item_param=args.share_param,
                          ini=False,
                          device=dev_id))

        if args.mix_cpu_gpu and args.use_one_hot_fea:
            # if use_one_hot_fea, user and movie feature is None
            # W can be extremely large, with mix_cpu_gpu W should be stored in CPU
            self.encoder.partial_to(dev_id)
        else:
            self.encoder.to(dev_id)

        self.decoder = BiDecoder(in_units=args.gcn_out_units,
                                 num_classes=len(args.rating_vals),
                                 num_basis=args.gen_r_num_basis_func)
        self.decoder.to(dev_id)

    def forward(self,
                compact_g,
                frontier,
                ufeat,
                ifeat,
                possible_rating_values,
                Two_Stage=False):
        # user_out, movie_out = self.encoder(frontier, ufeat, ifeat)
        '''
        user_out_2, movie_out_2 = self.encoder_2(frontier, user_out, movie_out)
        user_out = th.cat([user_out, user_out_2], 1)
        movie_out = th.cat([movie_out, movie_out_2], 1)
        '''
        user_out = []
        movie_out = []
        for i in range(0, args.layers):
            user_o, movie_o = self.encoder[i](frontier[i], ufeat, ifeat)
            ufeat = user_o
            ifeat = movie_o
            user_out.append(user_o)
            movie_out.append(movie_o)
            u_size = user_o.shape[0]
            m_size = movie_o.shape[0]
        for i in range(0, args.layers):
            if i == 0:
                user_o = user_out[i][:u_size, :]
                movie_o = movie_out[i][:m_size, :]
            else:
                user_o += user_out[i][:u_size, :] / float(i + 1)
                movie_o += movie_out[i][:m_size, :] / float(i + 1)
            #user_out.append(user_o)
            #movie_out.append(movie_o)

        # pred_ratings = self.decoder(compact_g, user_out, movie_out)
        # W_r_last = None
        # reg_loss = 0.0
        # for rating in self.rating_vals:
        #     rating = to_etype_name(rating)
        #     if W_r_last is not None:
        #         reg_loss += th.sum((self.encoder.W_r[rating] - W_r_last)**2)
        #     W_r_last = self.encoder.W_r[rating]
        # return pred_ratings, reg_loss
        pred_ratings = self.decoder(compact_g, user_o, movie_o)
        W_r_last = None
        reg_loss = 0.0
        '''
        for rating in self.rating_vals:
            rating = to_etype_name(rating)
            if W_r_last is not None:
                reg_loss += th.sum((self.encoder[0].W_r[rating] - W_r_last)**2)
            W_r_last = self.encoder[0].W_r[rating]
            #W_r_last_2 = self.encoder_2.W_r[rating]
        '''
        W = th.matmul(
            self.encoder[0].att,
            self.encoder[0].basis.view(self.encoder[0].basis_units, -1))
        W = W.view(len(self.rating_vals), self.src_in_units, -1)
        for i, rating in enumerate(self.rating_vals):
            rating = to_etype_name(rating)
            if i != 0:
                reg_loss += -th.sum(
                    th.cosine_similarity(W[i, :, :], W[i - 1, :, :], dim=1))
        return pred_ratings, reg_loss