Exemplo n.º 1
0
    def predict(self, data_points, load_path):
        res = []
        self.graph = tf.Graph()
        with self.graph.as_default():
            with tf.Session() as sess:

                saver = tf.train.import_meta_graph(load_path + '.meta')
                saver.restore(sess, load_path)

                # get weights and ops
                graph = tf.get_default_graph()

                # get weights and ops
                self.fm_col_vals = self.graph.get_operation_by_name(
                    "ModelInput/features").outputs[0]
                self.training = self.graph.get_tensor_by_name(
                    "ModelInput/training_flag:0")
                self.output_prob = self.graph.get_operation_by_name(
                    "ModelOutput/truediv").outputs[0]

                ###
                print('Start Predict.')

                for batch_id, (ft, labels) in DataInput(
                        data_points, self.batch_size, self.fm_cols,
                        self.label_name):
                    feed_vals = {}
                    feed_vals[self.fm_col_vals] = ft
                    feed_vals[self.training] = False

                    predictions = sess.run(self.output_prob,
                                           feed_dict=feed_vals)
                    res.append(predictions)

        return np.concatenate(res)
Exemplo n.º 2
0
def evaluation(sess, model):
    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Eval_auroc:{:.4f}'.format(auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))

    draw_prc(y, ano_scores, key='ResDEAAE_' + 'cross-e')
Exemplo n.º 3
0
def evaluation(sess, model, ratio):
    (sub_ano, sub_ano_label), _ = _split_dataset(ano, ano_label,
                                                 mapping_ratio[ratio])
    x = np.concatenate((norm, sub_ano), axis=0)
    y = np.concatenate((norm_label, sub_ano_label), axis=0)

    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Anomaly ratio:{:.4f}\tEval_auroc:{:.4f}'.format(ratio, auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))
def _eval(sess, model, test_data, label):
    ano_scores = []
    for _, batch_data in DataInput(test_data, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auroc
    auroc = calc_auroc(label, ano_scores)
    # Calculate metric
    prec, rec, f1 = 0., 0., 0.  # for sake of computation speed
    # prec, rec, f1 = calc_metric(label, ano_scores)

    global best_auroc
    if best_auroc < auroc:
        best_auroc = auroc
        model.save(sess, '{}/ckpt'.format(save_path))
    return auroc, prec, rec, f1
def evaluation(sess, model):
    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))

    with open('scores.pkl', 'wb') as f:
        pickle.dump((y, ano_scores), f, pickle.HIGHEST_PROTOCOL)

    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Eval_auroc:{:.4f}'.format(auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))

    draw_prc(y, ano_scores, key='DEAAE_' + method)
Exemplo n.º 6
0
def _eval(sess, model, test_data, label):
    ano_scores = []
    for _, batch_test_data in DataInput(test_data, test_batch_size):
        _ano_score, _, _ = model.eval(sess, batch_test_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))

    # Highest 80% are anomalous
    prec, rec, f1 = calc_metric(label, ano_scores)
    # Calculate auprc
    _auprc = calc_auc(label, ano_scores)

    global best_f1
    if best_f1 < f1:
        best_f1 = f1

    global best_auprc
    if best_auprc < _auprc:
        best_auprc = _auprc
        model.save(sess, '{}/ckpt'.format(save_path))
    return prec, rec, f1, _auprc
    prec, rec, f1, auprc = _eval(sess, model, x_val, y_val)
    print('Prec:{:.4f}  |  Rec:{:.4f}  |  F1:{:.4f}  |  Eval_auprc:{:.4f}'.
          format(prec, rec, f1, auprc))
    sys.stdout.flush()

    # Start training
    start_time = time.time()
    for i in range(nb_epochs):
        print('==== Training epoch {} ===='.format(i))
        sys.stdout.flush()

        # shuffle for each epoch
        x_train, y_train = _shuffle(x_train, y_train)

        loss_dis_sum, loss_gen_sum, loss_enc_sum = 0., 0., 0.
        for j, batch_data in DataInput(x_train, train_batch_size):
            # Update discriminator
            if (j % d_g_iter != 0) or (j == 0):
                loss_dis, loss_gen, loss_enc = model.train(sess,
                                                           batch_data,
                                                           learning_rate,
                                                           train_d=True)

            # Update generator and encoder
            else:
                loss_dis, loss_gen, loss_enc = model.train(sess,
                                                           batch_data,
                                                           learning_rate,
                                                           train_d=False)

            loss_dis_sum += loss_dis
Exemplo n.º 8
0
    val_set = pickle.load(f)
    test_set = pickle.load(f)

x_test, y_test = test_set

print('test set', x_test.shape)

with tf.Session() as sess:
    model = BiWGAN(input_dim, method, weight, degree)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    model.restore(sess, '{}/ckpt'.format(save_path))

    ano_scores = []
    for _, batch_test_data in DataInput(x_test, test_batch_size):
        _ano_score, _, _ = model.eval(sess, batch_test_data)
        # extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))

    # Highest 80% are anomalous
    prec, rec, f1 = calc_metric(y_test, ano_scores, percentile=80)

    # Calculate auc
    auprc = calc_auc(y_test, ano_scores)
    print('Prec:{:.4f}  |  Rec:{:.4f}  |  F1:{:.4f}  |  AUPRC:{:.4f}'.format(
        prec, rec, f1, auprc))

    # draw prc curve
    # draw_prc(y_test, ano_scores)
Exemplo n.º 9
0
    def train(self,
              train_points,
              eval_points,
              epoch_num,
              ob_step=5,
              save_path=None,
              load_path=None):  #[B,T,H]

        if load_path is None:
            self.graph = self.build_graph()

        else:
            self.graph = tf.Graph()

        with self.graph.as_default():
            with tf.Session() as sess:
                if load_path is None:
                    saver = tf.train.Saver()
                    tf.initialize_all_variables().run()
                else:
                    saver = tf.train.import_meta_graph(load_path + '.meta')
                    saver.restore(sess, load_path)

                    # get weights and ops
                    self.fm_col_vals = self.graph.get_operation_by_name(
                        "ModelInput/features").outputs[0]
                    self.labels = self.graph.get_tensor_by_name(
                        "ModelInput/labels:0")
                    self.training = self.graph.get_tensor_by_name(
                        "ModelInput/training_flag:0")

                    self.update = self.graph.get_operation_by_name(
                        "Optimizer/GradientDescent")
                    self.loss = self.graph.get_operation_by_name(
                        "Loss/Mean").outputs[0]
                    self.output_prob = self.graph.get_operation_by_name(
                        "ModelOutput/truediv").outputs[0]

                ### 训练
                print('Start Training')
                step = 0
                cnt = 0
                metric_train_loss = 0
                train_preds = []
                train_labels = []
                for ep in range(epoch_num):
                    print("############## epoch %d###############" % ep)
                    random.shuffle(train_points)
                    for batch_id, (ft, labels) in DataInput(
                            train_points, self.batch_size, self.fm_cols,
                            self.label_name):

                        feed_vals = {}
                        feed_vals[self.fm_col_vals] = ft
                        feed_vals[self.labels] = np.expand_dims(
                            np.array(labels), axis=1)
                        feed_vals[self.training] = True

                        _, l, predictions = sess.run(
                            [self.update, self.loss, self.output_prob],
                            feed_dict=feed_vals)

                        metric_train_loss += l * len(labels)
                        cnt += len(labels)
                        train_preds.append(predictions)
                        train_labels.append(labels)

                        #if ob_step steps have passed,we print current training result
                        if step > 0 and step % ob_step == 0:
                            accuracy = self.accuracy(
                                np.concatenate(train_preds, axis=0),
                                np.expand_dims(np.array(
                                    list(chain(*train_labels))),
                                               axis=1))

                            print('Minibatch loss at step %d: %f' %
                                  (step, metric_train_loss / cnt))
                            print('Minibatch accuracy: %.1f%%\n' %
                                  (100 * accuracy))

                            train_preds = []
                            train_labels = []
                            metric_train_loss = 0
                            cnt = 0
                        ###

                        #if one epoch finishes, we start evaluation on test set
                        if step == self.epoch_size - 1:
                            step = 0
                            eval_cnt = 0
                            eval_loss = 0
                            eval_preds = []
                            eval_labels = []
                            for batch_id, (ft, labels) in DataInput(
                                    eval_points, self.batch_size, self.fm_cols,
                                    self.label_name):
                                feed_vals = {}
                                feed_vals[self.fm_col_vals] = ft
                                feed_vals[self.training] = False
                                feed_vals[self.labels] = np.expand_dims(
                                    np.array(labels), axis=1)
                                l, predictions = sess.run(
                                    [self.loss, self.output_prob],
                                    feed_dict=feed_vals)
                                eval_loss += l * len(labels)
                                cnt += len(labels)
                                eval_preds.append(predictions)
                                eval_labels.append(labels)

                            accuracy = self.accuracy(
                                np.concatenate(eval_preds, axis=0),
                                np.expand_dims(np.array(
                                    list(chain(*eval_labels))),
                                               axis=1))

                            print('DEV_SET loss at step %d: %f' %
                                  (step, eval_loss / cnt))
                            print('DEV_SET accuracy: %.1f%%\n' %
                                  (100 * accuracy))

                        else:
                            step += 1

                #保存
                if save_path is not None:
                    saver.save(sess, save_path)
Exemplo n.º 10
0
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print('Objective1_Value: %.4f\t Objective2_Value: %.4f\t' %
          evaluate(sess, model, train_set))
    sys.stdout.flush()
    lr = 1
    start_time = time.time()
    last_auc = 0.0

    for epoch in range(100):
        random.shuffle(train_set)
        random.shuffle(test_set)
        epoch_size = round(len(train_set) / batch_size)
        loss_sum = 0.0
        for _, uij in DataInput(train_set, batch_size):
            loss = model.train(sess, uij, lr)
            loss_sum += loss
        print('Epoch %d Train_Loss: %.4f' %
              (model.global_epoch_step.eval(), loss_sum))
        print('Epoch %d DONE\tCost time: %.2f' %
              (model.global_epoch_step.eval(), time.time() - start_time))
        print('Objective1_Value: %.4f\t Objective2_Value: %.4f\t' %
              evaluate(sess, model, train_set))
        #print('Objective1_Value: %.4f\t Objective2_Value: %.4f\t' % evaluate(sess, model, test_set))
        sys.stdout.flush()
        model.global_epoch_step_op.eval()
        if epoch % 5 == 0:
            update_target_graph('primary_dqn', 'target_dqn')

end_time = time.time()
Exemplo n.º 11
0
testset2 = testset2[:len(testset2)//batch_size*batch_size]

gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    model = Model(user_count, item_count, batch_size)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    
    print('Domain_A_Initialized_AUC: %.4f\tDomain_B_Initialized_AUC: %.4f' % compute_auc(sess, model, testset1, testset2))
    sys.stdout.flush()
    start_time = time.time()
    last_auc = 0.0
    
    for _ in range(1000):
        loss_sum = 0.0
        for uij in DataInput(trainset1, batch_size):
            loss = model.train_1(sess, uij, lr)
            loss_sum += loss
        for uij in DataInput(trainset2, batch_size):
            loss = model.train_2(sess, uij, lr)
            loss_sum += loss
        model.train_orth(sess, uij[0], lr)
        test_auc_1, test_auc_2 = compute_auc(sess, model, testset1, testset2)
        train_auc_1, train_auc_2 = compute_auc(sess, model, trainset1, trainset2)
        print('Epoch %d \tDomain A Train_AUC: %.4f\tTest_AUC: %.4F' % (model.global_epoch_step.eval(), train_auc_1, test_auc_1))
        print('Epoch %d \tDomain B Train_AUC: %.4f\tTest_AUC: %.4F' % (model.global_epoch_step.eval(), train_auc_2, test_auc_2))
        print('Epoch %d \tTrain_loss: %.4f' % (model.global_epoch_step.eval(), loss_sum))
        print('Epoch %d DONE\tCost time: %.2f' % (model.global_epoch_step.eval(), time.time()-start_time))
        sys.stdout.flush()
        model.global_epoch_step_op.eval()
        hit_1, hit_2 = compute_hr(sess, model, testset1, testset2)