Esempio n. 1
0
def compute_scores(raw_data_dir=FLAGS.raw_data, data_dir=FLAGS.data_dir,
  dataset=FLAGS.dataset, save_recommendation=FLAGS.saverec,
  train_dir=FLAGS.train_dir, test=FLAGS.test):
  
  from evaluate import Evaluation as Evaluate
  evaluation = Evaluate(raw_data_dir, test=test)
 
  R = recommend(evaluation.get_uids(), data_dir=data_dir)
  
  evaluation.eval_on(R)
  scores_self, scores_ex = evaluation.get_scores()
  mylog("====evaluation scores (NDCG, RECALL, PRECISION, MAP) @ 2,5,10,20,30====")
  mylog("METRIC_FORMAT (self): {}".format(scores_self))
  mylog("METRIC_FORMAT (ex  ): {}".format(scores_ex))
  if save_recommendation:
    name_inds = os.path.join(train_dir, "indices.npy")
    np.save(name_inds, rec)
Esempio n. 2
0
def recommend(raw_data=FLAGS.raw_data, test=FLAGS.test, loss=FLAGS.loss, 
  batch_size=FLAGS.batch_size, topN=FLAGS.top_N_items,
  device_log=FLAGS.device_log):

  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, 
    log_device_placement=device_log)) as sess:
    mylog("reading data")

    (_, items_dev, _, _, u_attributes, i_attributes, item_ind2logit_ind, 
      logit_ind2item_ind, _, user_index, item_index) = get_data(raw_data, 
      data_dir=FLAGS.data_dir)
    
    from evaluate import Evaluation as Evaluate
    
    evaluation = Evaluate(raw_data, test=test)
    
    model = create_model(sess, u_attributes, i_attributes, item_ind2logit_ind,
      logit_ind2item_ind, loss=loss, ind_item=None)

    Uinds = evaluation.get_uinds()
    N = len(Uinds)
    mylog("N = %d" % N)
    Uinds = [p for p in Uinds if p in items_dev]
    mylog("new N = {}, (reduced from original {})".format(len(Uinds), N))
    if len(Uinds) < N:
      evaluation.set_uinds(Uinds)
    N = len(Uinds)
    rec = np.zeros((N, topN), dtype=int)
    count = 0
    time_start = time.time()
    for idx_s in range(0, N, batch_size):
      count += 1
      if count % 100 == 0:
        mylog("idx: %d, c: %d" % (idx_s, count))
        
      idx_e = idx_s + batch_size
      if idx_e <= N:
        users = Uinds[idx_s: idx_e]
        items_input = [items_dev[u] for u in users]
        items_input = map(list, zip(*items_input))
        recs = model.step(sess, users, items_input, forward_only=True, 
          recommend = True, recommend_new = FLAGS.recommend_new)
        rec[idx_s:idx_e, :] = recs
      else:
        users = range(idx_s, N) + [0] * (idx_e - N)
        users = [Uinds[t] for t in users]
        items_input = [items_dev[u] for u in users]
        items_input = map(list, zip(*items_input))
        recs = model.step(sess, users, items_input, forward_only=True, 
          recommend = True, recommend_new = FLAGS.recommend_new)
        idx_e = N
        rec[idx_s:idx_e, :] = recs[:(idx_e-idx_s),:]
    # return rec: i:  uinds[i] --> logid

    time_end = time.time()
    mylog("Time used %.1f" % (time_end - time_start))

    ind2id = {}
    for iid in item_index:
      uind = item_index[iid]
      assert(uind not in ind2id)
      ind2id[uind] = iid
    
    uids = evaluation.get_uids()
    R = {}
    for i in xrange(N):
      uid = uids[i]
      R[uid] = [ind2id[logit_ind2item_ind[v]] for v in list(rec[i, :])]

    evaluation.eval_on(R)
    scores_self, scores_ex = evaluation.get_scores()
    mylog("====evaluation scores (NDCG, RECALL, PRECISION, MAP) @ 2,5,10,20,30====")
    mylog("METRIC_FORMAT (self): {}".format(scores_self))
    mylog("METRIC_FORMAT (ex  ): {}".format(scores_ex))

  return