コード例 #1
0
def test_all():
    dataset = MovieLens('./ml-latest-small/ratings.csv', 'train', 60, 5, .8)
    model = BERT4REC(total_items=10000,
                     emb_dims=32,
                     num_heads=1,
                     dropout_rate=.8,
                     learning_rate=1e-3)
    print(model)

    test_dataset = DataLoader(dataset, batch_size=1)
    for i, data in enumerate(test_dataset):
        x, y_label, mask = data

        y_pred = model(x)
        y_pred = y_pred.view(-1, y_pred.size(2))
        y_label = y_label.view(-1)

        x = x.view(-1)
        mask = mask.view(-1)
        print(mask, mask.shape)
        print(x, x.shape)
        print(x.shape, y_label.shape, y_pred.shape)

        loss = masked_cross_entropy(y_pred, y_label, mask)
        acc = masked_accuracy(y_pred, y_label, mask)
        recall = masked_recall_at_k(y_pred, y_label, mask, 10)
        print(loss, acc, recall)
        break
コード例 #2
0
 def set_accuracy(self):
     """
     准确率
     """
     with tf.name_scope('accuracy'):
         self.accuracy = metrics.masked_accuracy(self.outputs, self.labels, self.labels_mask)
         self.report = [tf.argmax(self.labels, 1), tf.argmax(self.outputs, 1)]
コード例 #3
0
ファイル: model.py プロジェクト: yajian/ml_by_numpy
 def test(self):
     y_train, train_mask = self.y_test, self.test_mask
     hidden = forward_hidden(self.adj, self.features, self.weight_hidden, activation=lambda x: np.maximum(x, 0))
     outputs = forward_hidden(self.adj, hidden, self.weight_outputs)
     loss = forward_cross_entropy_loss(outputs, y_train, train_mask)
     loss += self.weight_decay * l2_loss(self.weight_hidden)
     acc = masked_accuracy(outputs, y_train, train_mask)
     return loss, acc
コード例 #4
0
    def loss_sum(scores, lbl_in, msk_in, neg_msk, weight_decay, coefs, emb):
        loss_basic = masked_accuracy(scores, lbl_in, msk_in, neg_msk)

        para_decode = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope="deco")
        loss_basic += weight_decay * tf.nn.l2_loss(para_decode)

        return loss_basic
コード例 #5
0
 def loss_overall(scores, lbl_in, msk_in, neg_msk, weight_decay, emb):
     loss_basic = masked_accuracy(scores, lbl_in, msk_in, neg_msk)
     para_decode = tf.compat.v1.get_collection(
         tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope="deco_revised")
     loss_basic += weight_decay * tf.nn.l2_loss(para_decode[0])
     loss_basic += weight_decay * tf.nn.l2_loss(para_decode[1])
     loss_basic += weight_decay * tf.nn.l2_loss(para_decode[2])
     return loss_basic
コード例 #6
0
 def _accuracy(self):
     if self.multilabel:
         self.accuracy = metrics.masked_accuracy_multilabel(
             self.outputs, self.placeholders['labels'],
             self.placeholders['labels_mask'])
     else:
         self.accuracy = metrics.masked_accuracy(
             self.outputs, self.placeholders['labels'],
             self.placeholders['labels_mask'])
コード例 #7
0
def train_loop(sess, fetches, feed_dict, epoch):
    outs = sess.run(fetches, feed_dict=feed_dict)
    print("Epoch:", '%04d' % (epoch + 1), "train_loss=",
          "{:.5f}".format(outs[1]))
    preds = outs[2]
    labels = outs[3]
    labels[labels > 0] = 1
    f1_micro, f1_macro = masked_accuracy(preds, labels)
    print('f1_micro, f1_macro', f1_micro, f1_macro)
    return f1_micro, f1_macro
コード例 #8
0
ファイル: model.py プロジェクト: yajian/ml_by_numpy
 def train(self):
     # outputs的输出形状(2708,16),即(batch, hidden)
     self.hidden = forward_hidden(self.adj, self.features, self.weight_hidden, activation=lambda x: np.maximum(x, 0))
     # outputs的输出形状(2708,7),即(batch, classes)
     self.outputs = forward_hidden(self.adj, self.hidden, self.weight_outputs)
     # 正向传播过程中的loss
     loss = forward_cross_entropy_loss(self.outputs, self.y_train, self.train_mask)
     # 正则项
     weight_decay_loss = self.weight_decay * l2_loss(self.weight_hidden)
     loss += weight_decay_loss
     # 计算准确率
     acc = masked_accuracy(self.outputs, self.y_train, self.train_mask)
     return loss, acc
コード例 #9
0
 def eval_f1(outs, x, y, name):
     y_preds = outs[x]
     y_labels = outs[y]
     y_labels[y_labels > 0] = 1
     f1_micro, f1_macro = masked_accuracy(y_preds, y_labels)
     if name == 'validation':
         f1_micros_valid.append(f1_micro)
         f1_macros_valid.append(f1_macro)
         print(name, 'f1_micro, f1_macro', f1_micro, f1_macro,
               np.argmax(f1_micros_valid), np.argmax(f1_macros_valid))
     else:
         f1_micros_test.append(f1_micro)
         f1_macros_test.append(f1_macro)
         print(name, 'f1_micro, f1_macro', f1_micro, f1_macro,
               np.argmax(f1_micros_test), np.argmax(f1_macros_test))
コード例 #10
0
 def eval_f1(outs, x, y, name):
     global f1_micro_max, f1_macro_max
     y_preds = outs[x]
     y_labels = outs[y]
     y_labels[y_labels > 0] = 1
     f1_micro, f1_macro = masked_accuracy(y_preds, y_labels)
     if name == 'validation':
         f1_micros_valid.append(f1_micro)
         f1_macros_valid.append(f1_macro)
         print(name, 'f1_micro, f1_macro', f1_micro, f1_macro,
               np.argmax(f1_micros_valid), np.argmax(f1_macros_valid))
     else:
         f1_micros_test.append(f1_micro)
         f1_macros_test.append(f1_macro)
         print(name, 'f1_micro, f1_macro', f1_micro, f1_macro,
               np.argmax(f1_micros_test), np.argmax(f1_macros_test))
         f1_micro_max = max(f1_micro_max, f1_micro)
         f1_macro_max = max(f1_macro_max, f1_macro)
         print('f1_micro_max', f1_micro_max, 'f1_macro_max',
               f1_macro_max)
         np.save(
             '{}/../exp/{}_result_{}'.format(current_folder, FLAGS.eval,
                                             FLAGS.train_ratio),
             (f1_micro_max, f1_macro_max))
コード例 #11
0
ファイル: models.py プロジェクト: flyingdoog/GraphSAGE
 def _accuracy(self):
     if self.categorical:
         self.accuracy = metrics.masked_accuracy(self.outputs, self.placeholders['labels'],
                 self.placeholders['labels_mask'])
コード例 #12
0
 def _accuracy(self):
     self.accuracy = masked_accuracy(self.outputs,
                                     self.placeholders['labels'],
                                     self.placeholders['labels_mask'])
コード例 #13
0
ファイル: train_GCN.py プロジェクト: chinmay5/graph_exp
def accuracy(outputs, labels, is_gcn=True):
    accuracy = masked_accuracy(outputs, labels)
    return accuracy
コード例 #14
0
def train(train_arr, test_arr, mask_neg, label_neg):

    # training params
    batch_size = 1
    nb_epochs = 200
    lr = 0.005
    l2_coef = 5e-4
    weight_decay = 5e-4
    hid_units = [8]
    n_heads = [1, 1]
    residual = False
    nonlinearity = tf.nn.elu
    model = HeteGAT

    #print('Dataset: ' + dataset)
    print('----- Opt. hyperparams -----')
    print('lr: ' + str(lr))
    print('l2_coef: ' + str(l2_coef))
    print('----- Archi. hyperparams -----')
    print('nb. layers: ' + str(len(hid_units)))
    print('nb. units per layer: ' + str(hid_units))
    print('nb. attention heads: ' + str(n_heads))
    print('residual: ' + str(residual))
    print('nonlinearity: ' + str(nonlinearity))

    interaction_list, adj_list, fea_list, y_train, y_test, train_mask, test_mask, labels = load_data(
        train_arr, test_arr)

    nb_nodes = fea_list[0].shape[0]
    ft_size = fea_list[0].shape[1]

    fea_list = [fea[np.newaxis] for fea in fea_list]
    adj_list = [adj[np.newaxis] for adj in adj_list]
    interaction_list = [inter[np.newaxis] for inter in interaction_list]

    biases_list = [
        process.adj_to_bias(adj, [nb_nodes], nhood=1) for adj in adj_list
    ]

    print('build graph...')
    with tf.Graph().as_default():
        with tf.name_scope('input'):
            ftr_in_list = [
                tf.placeholder(dtype=tf.float32,
                               shape=(batch_size, nb_nodes, ft_size),
                               name='ftr_in_{}'.format(i))
                for i in range(len(fea_list))
            ]
            bias_in_list = [
                tf.placeholder(dtype=tf.float32,
                               shape=(batch_size, nb_nodes, nb_nodes),
                               name='bias_in_{}'.format(i))
                for i in range(len(biases_list))
            ]
            inter_in_list = [
                tf.placeholder(dtype=tf.float32,
                               shape=(batch_size, nb_nodes, nb_nodes),
                               name='inter_in_{}'.format(i))
                for i in range(len(interaction_list))
            ]
            lbl_in = tf.placeholder(dtype=tf.int32,
                                    shape=(237529, batch_size),
                                    name='lbl_in')
            msk_in = tf.placeholder(dtype=tf.int32,
                                    shape=(237529, batch_size),
                                    name='msk_in')
            neg_msk = tf.placeholder(dtype=tf.int32,
                                     shape=(237529, batch_size),
                                     name='neg_msk')
            attn_drop = tf.placeholder(dtype=tf.float32,
                                       shape=(),
                                       name='attn_drop')
            ffd_drop = tf.placeholder(dtype=tf.float32,
                                      shape=(),
                                      name='ffd_drop')
            is_train = tf.placeholder(dtype=tf.bool, shape=(), name='is_train')
        # forward
        final_embedding, att_val, embed_list = model.encoder(
            ftr_in_list,
            nb_nodes,
            is_train,
            attn_drop,
            ffd_drop,
            bias_mat_list=bias_in_list,
            inter_mat_list=inter_in_list,
            hid_units=hid_units,
            n_heads=n_heads,
            residual=residual,
            activation=nonlinearity)
        logits = model.decoder(final_embedding)

        # cal masked_loss
        loss = masked_accuracy(logits, lbl_in, msk_in, neg_msk)
        para_decode = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope="deco")
        loss += weight_decay * tf.nn.l2_loss(para_decode)

        para_encode = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope="enco_second")
        loss += weight_decay * tf.nn.l2_loss(para_encode[0])
        loss += weight_decay * tf.nn.l2_loss(para_encode[1])

        accuracy = masked_accuracy(logits, lbl_in, msk_in, neg_msk)

        # optimzie
        train_op = model.training(loss, lr, l2_coef)

        #saver = tf.train.Saver()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        print("Start to train")
        with tf.Session() as sess:
            sess.run(init_op)

            train_loss_avg = 0
            train_acc_avg = 0

            #neg_mask, label_neg = generate_mask(labels, len(train_arr))
            neg_mask = mask_neg
            label_neg = label_neg

            for epoch in range(nb_epochs):

                t = time.time()

                tr_step = 0

                tr_size = fea_list[0].shape[0]

                # ================   training    ============
                while tr_step * batch_size < tr_size:

                    fd1 = {
                        i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
                        for i, d in zip(ftr_in_list, fea_list)
                    }
                    fd2 = {
                        i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
                        for i, d in zip(bias_in_list, biases_list)
                    }
                    fd4 = {
                        i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
                        for i, d in zip(inter_in_list, interaction_list)
                    }
                    fd3 = {
                        lbl_in: y_train,
                        msk_in: train_mask,
                        neg_msk: neg_mask,
                        is_train: True,
                        attn_drop: 0.0,
                        ffd_drop: 0.0
                    }
                    fd = fd1
                    fd.update(fd2)
                    fd.update(fd4)
                    fd.update(fd3)
                    _, loss_value_tr, acc_tr, att_val_train, embed = sess.run(
                        [train_op, loss, accuracy, att_val, embed_list],
                        feed_dict=fd)
                    train_loss_avg += loss_value_tr
                    train_acc_avg += acc_tr
                    tr_step += 1

                print(
                    'Epoch: %04d | Training: loss = %.5f, acc = %.5f, time = %.5f'
                    % ((epoch + 1), loss_value_tr, acc_tr, time.time() - t))

            print("Finish traing.")

            # ================   test    ========================================================================================================
            ts_size = fea_list[0].shape[0]
            ts_step = 0
            ts_loss = 0.0
            ts_acc = 0.0

            while ts_step * batch_size < ts_size:
                fd1 = {
                    i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
                    for i, d in zip(ftr_in_list, fea_list)
                }
                fd2 = {
                    i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
                    for i, d in zip(bias_in_list, biases_list)
                }
                fd4 = {
                    i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
                    for i, d in zip(inter_in_list, interaction_list)
                }
                fd3 = {
                    lbl_in: y_test,
                    msk_in: test_mask,
                    neg_msk: neg_mask,
                    is_train: False,
                    attn_drop: 0.0,
                    ffd_drop: 0.0
                }
                fd = fd1
                fd.update(fd2)
                fd.update(fd4)
                fd.update(fd3)
                out_come, loss_value_ts, acc_ts, jhy_final_embedding, embed = sess.run(
                    [logits, loss, accuracy, final_embedding, embed_list],
                    feed_dict=fd)
                ts_loss += loss_value_ts
                ts_acc += acc_ts
                ts_step += 1

            print('Test loss:', ts_loss / ts_step, '; Test accuracy:',
                  ts_acc / ts_step)

            out_come = out_come.reshape((1373, 173))
            test_negative_samples = test_negative_sample(
                labels, len(test_arr), neg_mask.reshape((1373, 173)))
            test_labels, scores = ROC(out_come, labels, test_arr,
                                      test_negative_samples)
            return test_labels, scores

            sess.close()
コード例 #15
0
def train(train_arr, test_arr):
    
    # training params
    batch_size = 1
    nb_epochs = 200
    lr = 0.005  
    l2_coef = 0.0005  
    weight_decay = 5e-4
    hid_units = [8] 
    n_heads = [4, 1] 
    residual = False
    nonlinearity = tf.nn.elu
    model = GAT

    print('----- Opt. hyperparams -----')
    print('lr: ' + str(lr))
    print('l2_coef: ' + str(l2_coef))
    print('----- Archi. hyperparams -----')
    print('nb. layers: ' + str(len(hid_units)))
    print('nb. units per layer: ' + str(hid_units))
    print('nb. attention heads: ' + str(n_heads))
    print('residual: ' + str(residual))
    print('nonlinearity: ' + str(nonlinearity))
    print('model: ' + str(model))

    interaction, features, y_train, y_test, train_mask, test_mask, labels = load_data(train_arr, test_arr)
    nb_nodes = features.shape[0]  
    ft_size = features.shape[1]  

    features = features[np.newaxis]
    interaction = interaction[np.newaxis]
    biases = adj_to_bias(interaction, [nb_nodes], nhood=1) 
    
    nd = np.max(labels[:,0])
    nm = np.max(labels[:,1])
    nd = nd.astype(np.int32)
    nm = nm.astype(np.int32)
    entry_size = nd * nm
    with tf.Graph().as_default():
        with tf.name_scope('input'):
              feature_in = tf.compat.v1.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, ft_size))
              bias_in = tf.compat.v1.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, nb_nodes))
              lbl_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
              msk_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
              neg_msk = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size,batch_size))
              attn_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
              ffd_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
              is_train = tf.compat.v1.placeholder(dtype=tf.bool, shape=())
        
        final_embedding, coefs = model.encoder(feature_in, nb_nodes, is_train,
                                attn_drop, ffd_drop,
                                bias_mat=bias_in,
                                hid_units=hid_units, n_heads=n_heads,
                                residual=residual, activation=nonlinearity)
        scores = model.decoder(final_embedding, nd)

        loss = model.loss_sum(scores, lbl_in, msk_in, neg_msk, weight_decay, coefs, final_embedding)
    
        accuracy = masked_accuracy(scores, lbl_in, msk_in, neg_msk)
        
        train_op = model.training(loss, lr, l2_coef)

        init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())

        with tf.compat.v1.Session() as sess:
          sess.run(init_op)

          train_loss_avg = 0
          train_acc_avg = 0

          for epoch in range(nb_epochs):
              
              t = time.time()
              
              ##########    train     ##############
              
              tr_step = 0
              tr_size = features.shape[0] 
              
              neg_mask, label_neg = generate_mask(labels, len(train_arr))
              
              while tr_step * batch_size < tr_size:  
                      _, loss_value_tr, acc_tr = sess.run([train_op, loss, accuracy],
                      feed_dict={
                           feature_in: features[tr_step*batch_size:(tr_step+1)*batch_size],   
                           bias_in: biases[tr_step*batch_size:(tr_step+1)*batch_size],
                           lbl_in: y_train,
                           msk_in: train_mask,
                           neg_msk: neg_mask,
                           is_train: True,
                           attn_drop: 0.1, ffd_drop: 0.1})
                      train_loss_avg += loss_value_tr
                      train_acc_avg += acc_tr
                      tr_step += 1
              print('Epoch: %04d | Training: loss = %.5f, acc = %.5f, time = %.5f' % ((epoch+1), loss_value_tr,acc_tr, time.time()-t))
          
          print("Finish traing.")
          
          ###########     test      ############
          
          ts_size = features.shape[0]
          ts_step = 0
          ts_loss = 0.0
          ts_acc = 0.0
    
          print("Start to test")
          while ts_step * batch_size < ts_size:
              out_come, emb, coef, loss_value_ts, acc_ts = sess.run([scores, final_embedding, coefs, loss, accuracy],
                      feed_dict={
                          feature_in: features[ts_step*batch_size:(ts_step+1)*batch_size],
                          bias_in: biases[ts_step*batch_size:(ts_step+1)*batch_size],
                          lbl_in: y_test,
                          msk_in: test_mask,
                          neg_msk: neg_mask,
                          is_train: False,
                          attn_drop: 0.0, ffd_drop: 0.0})
              ts_loss += loss_value_ts
              ts_acc += acc_ts
              ts_step += 1
          print('Test loss:', ts_loss/ts_step, '; Test accuracy:', ts_acc/ts_step)
              
          out_come = out_come.reshape((nd,nm))
          test_negative_samples = test_negative_sample(labels,len(test_arr),neg_mask.reshape((nd,nm)))
          test_labels, score = ROC(out_come,labels, test_arr,test_negative_samples)  
              
          return test_labels, score
          sess.close()
コード例 #16
0
ファイル: train.py プロジェクト: longyahui/GCATSL
def train(train_arr, test_arr, cv, args, labels):
    batch_size = 1
    l2_coef = 0.0005
    hid_units = [8] 
    #nb_epochs = 600  
    #lr = 0.005  
    #weight_decay = 1e-4  
    #n_heads = 2
    
    nb_epochs = args.n_epoch
    lr = args.learning_rate
    weight_decay = args.weight_decay
    n_heads = args.n_head
    
    residual = False
    nonlinearity = tf.nn.elu
    model = GAT

    print('----- Opt. hyperparams -----')
    print('lr: ' + str(lr))
    print('l2_coef: ' + str(l2_coef))
    print('----- Archi. hyperparams -----')
    print('nb. layers: ' + str(len(hid_units)))
    print('nb. units per layer: ' + str(hid_units))
    print('nb. attention heads: ' + str(n_heads))
    print('residual: ' + str(residual))
    print('nonlinearity: ' + str(nonlinearity))
    print('model: ' + str(model))

    interaction_local_list, features_list, y_train, y_test, train_mask, test_mask, interaction_global_list = load_data(train_arr, test_arr, cv, args, labels)
    nb_nodes = features_list[0].shape[0]  
    ft_size = features_list[0].shape[1]  
    
    features_list = [feature[np.newaxis] for feature in features_list]  
    biases_local_list = [sparse_to_tuple(interaction) for interaction in interaction_local_list]  
    biases_global_list = [sparse_to_tuple(interaction) for interaction in interaction_global_list]
    #n = 6375
    n = args.n_node
    entry_size = n * n
    with tf.Graph().as_default():
        with tf.name_scope('input'):
              feature_in_list = [tf.placeholder(dtype=tf.float32,
                                      shape=(batch_size, nb_nodes, ft_size),
                                      name='ftr_in_{}'.format(i))
                       for i in range(len(features_list))]
              bias_in_local_list = [tf.compat.v1.sparse_placeholder(tf.float32, name='ftr_in_{}'.format(i)) for i in range(len(biases_local_list))]
              bias_in_global_list = [tf.compat.v1.sparse_placeholder(tf.float32, name='ftr_in_{}'.format(i)) for i in range(len(biases_global_list))]
              lbl_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
              msk_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
              neg_msk = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size,batch_size))
              attn_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
              ffd_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
              is_train = tf.compat.v1.placeholder(dtype=tf.bool, shape=())
        
        final_embedding = model.encoder(feature_in_list, nb_nodes, is_train,   
                                attn_drop, ffd_drop,
                                bias_mat_local_list = bias_in_local_list,
                                bias_mat_global_list = bias_in_global_list,
                                hid_units=hid_units, n_heads = n_heads,
                                residual=residual, activation=nonlinearity)
        
        pro_matrix = model.decoder_revised(final_embedding)    
        
        loss = model.loss_overall(pro_matrix, lbl_in, msk_in, neg_msk, weight_decay, final_embedding)
        accuracy = masked_accuracy(pro_matrix, lbl_in, msk_in, neg_msk)
        
        train_op = model.training(loss, lr, l2_coef)

        init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
        neg_mask = generate_mask(labels, len(train_arr), args.n_node)
        
        #start to train
        with tf.compat.v1.Session() as sess:
          sess.run(init_op)
          train_loss_avg = 0
          train_acc_avg = 0

          for epoch in range(nb_epochs):
              t = time.time()
              
              ##########    train     ##############
              tr_step = 0
              tr_size = features_list[0].shape[0] 
              while tr_step * batch_size < tr_size:  
                      fd1 = {i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
                             for i, d in zip(feature_in_list, features_list)}       
                      fd2 = {bias_in_local_list[i]: biases_local_list[i] for i in range(len(biases_local_list))}   
                      fd3 = {bias_in_global_list[i]: biases_global_list[i] for i in range(len(biases_global_list))}   
                      fd4 = {lbl_in: y_train,   
                             msk_in: train_mask,       
                             neg_msk: neg_mask,
                             is_train: True,
                             attn_drop: args.dropout,
                             ffd_drop: args.dropout}
                      fd = fd1
                      fd.update(fd2)
                      fd.update(fd3)
                      fd.update(fd4)                  
                      _, loss_value_tr, acc_tr = sess.run([train_op, loss, accuracy], feed_dict=fd)
                     
                      train_loss_avg += loss_value_tr
                      train_acc_avg += acc_tr
                      tr_step += 1  
                     
              print('Epoch: %04d | Training: loss = %.5f, acc = %.5f, time = %.5f' % ((epoch+1), loss_value_tr,acc_tr, time.time()-t))
          
          print("Finish traing.")
          
          ###########     test      ############
          ts_size = features_list[0].shape[0]
          ts_step = 0
          ts_loss = 0.0
          ts_acc = 0.0
          print("Start to test")
          while ts_step * batch_size < ts_size:
              fd1 = {i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
                     for i, d in zip(feature_in_list, features_list)}       
              fd2 = {bias_in_local_list[i]: biases_local_list[i] for i in range(len(biases_local_list))}   
              fd3 = {bias_in_global_list[i]: biases_global_list[i] for i in range(len(biases_global_list))}   
              fd4 = {lbl_in: y_test,   
                     msk_in: test_mask,       
                     neg_msk: neg_mask,
                     is_train: False,
                     attn_drop: 0.0,
                     ffd_drop: 0.0}
              fd = fd1
              fd.update(fd2)
              fd.update(fd3)
              fd.update(fd4) 
              score_matrix, loss_value_ts, acc_ts = sess.run([pro_matrix, loss, accuracy], feed_dict=fd)
              ts_loss += loss_value_ts
              ts_acc += acc_ts
              ts_step += 1
          print('Test loss:', ts_loss/ts_step, '; Test accuracy:', ts_acc/ts_step)
          
          score_matrix = score_matrix.reshape((n,n))
          test_positive_samples = test_positive_sample(labels, test_arr)
          test_negative_samples = test_negative_sample(labels,len(test_arr),neg_mask.reshape((n,n)))
          test_labels, test_scores = ROC(score_matrix,labels, test_arr,test_negative_samples) 
          test_samples = np.vstack((test_positive_samples, test_negative_samples))
          return test_labels, test_scores, test_samples
          sess.close()
コード例 #17
0
ファイル: models.py プロジェクト: LiZhang-github/NFC-GCN
 def _accuracy(self):
     #pdb.set_trace()
     #print("##### model accuracy part ####")
     self.accuracy = masked_accuracy(self.outputs,
                                     self.placeholders['labels'],
                                     self.placeholders['labels_mask'])