Example #1
0
class TestModel(object):
    def __init__(self, args):
        self.args = args

        self.doc = Documents(num_total=args.num_documents,
                             num_feature=args.feature_size,
                             num_visible=args.feature_size_vis,
                             seed=100)
        self.user = UserResponse(doc_feature_size=args.feature_size,
                                 slate_size=args.slate_size,
                                 seed=100)
        self.evaluator = Evaluator(model_path=os.path.join(
            os.path.dirname(os.path.realpath(__file__)), args.evaluator_path))
        self.test_set = Dataset(self.doc, 'test')
        self.model = self._model_init(args)

    def _model_init(self, args):
        raise NotImplementedError

    def test_on_testset(self):
        raise NotImplementedError

    def test_on_user(self):
        with self.model.model_graph.as_default() as g:
            user_res_all, eval_pred_all = [], []
            while True:
                try:
                    x_data_id, x_data, y = self.test_set.read(1000)
                    rank_id = self.model.rank(x_data)
                    batch_id = np.tile(
                        np.arange(rank_id.shape[0]).reshape((-1, 1)),
                        (1, rank_id.shape[1]))
                    x_data_id_rank = x_data_id[(batch_id, rank_id)]
                    # user
                    slate = self.doc.get_feature_all(
                        x_data_id_rank.reshape((-1, )))
                    slate = slate.reshape(
                        (x_data_id_rank.shape[0], x_data_id_rank.shape[1], -1))
                    res = self.user.response(slate)
                    user_res_all.append(res)
                    # evaluator
                    slate_ = self.doc.get_feature_visible(
                        x_data_id_rank.reshape((-1, )))
                    slate_ = slate_.reshape(
                        (x_data_id_rank.shape[0], x_data_id_rank.shape[1], -1))
                    pred = self.evaluator.predict(slate_)
                    eval_pred_all.append(pred)
                except Exception as e:
                    print(e)
                    break
            user_res_all = np.concatenate(user_res_all, axis=0)
            eval_pred_all = np.concatenate(eval_pred_all, axis=0)
            print('True score:', np.mean(np.sum(user_res_all, axis=1)))
            print('Eval score:', np.mean(np.sum(eval_pred_all, axis=1)))
Example #2
0
    def __init__(self, args):
        self.args = args

        self.doc = Documents(num_total=args.num_documents,
                             num_feature=args.feature_size,
                             num_visible=args.feature_size_vis,
                             seed=100)
        self.user = UserResponse(doc_feature_size=args.feature_size,
                                 slate_size=args.slate_size,
                                 seed=100)
        self.evaluator = Evaluator(model_path=os.path.join(
            os.path.dirname(os.path.realpath(__file__)), args.evaluator_path))
        self.test_set = Dataset(self.doc, 'test')
        self.model = self._model_init(args)
Example #3
0
        default=datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
    parser.add_argument('--evaluator_path',
                        type=str,
                        default='model_eval',
                        help='evaluator ckpt dir')
    FLAGS, _ = parser.parse_known_args()
    return FLAGS


if __name__ == "__main__":
    args = parse_args()
    print(args)
    write_args(args, './model_params.txt')
    # dataset
    doc = Documents(num_total=args.num_documents,
                    num_feature=args.feature_size,
                    num_visible=args.feature_size_vis,
                    seed=100)
    train_set = Dataset(doc, 'train')
    val_set = Dataset(doc, 'validation')
    model_path = os.path.join(args.checkpointDir, args.algo, args.timestamp)
    model = MontCarloModel(args, model_path, args.algo)
    evaluator = Evaluator(model_path=os.path.join(
        os.path.dirname(os.path.realpath(__file__)), args.evaluator_path))
    with model.model_graph.as_default() as g:
        sess = tf.Session(graph=g)
        model.set_sess(sess)

        path1, path2 = os.path.join(model_path,
                                    'train'), os.path.join(model_path, 'test')
        if not os.path.isdir(path1):
            os.makedirs(path1)