def __init__(self, valid_repos_list, pm_dir, assessments_dir): logging.basicConfig(filename="AvgUserSrdp_performance_"+str(time.time())+".log",level=logging.DEBUG) self.valid_repos_list = valid_repos_list self.NU = len(self.valid_repos_list) self.total_positives = np.array([len(a) for a in valid_repos_list]) self.predicted_positives = np.zeros(self.NU) self.out_dir = assessments_dir self.fileno = 0 self.pm_dir = pm_dir r_fn = Runner.run_results_fn(pm_dir, 0) self.pm_repos_list = Recommender.load_recommendations(r_fn)
argparser = ArgumentParser() argparser.add_argument('metric', help="Which metric to test among" \ "recall, unserendipity, sdist, srdp") argparser.add_argument('recommendations_fn', help="filename of recommendations") argparser.add_argument('G_I_ru_tm_fn', help="Interest graph (repositories, users) filename") argparser.add_argument('vrt_fn', help="Validation repositories and times filename") argparser.add_argument('Ks', help="Top Ks", type=int, nargs='+') #Needed if doing sdist argparser.add_argument('--G_F_fn', help="Followership graph (users, users) filename") argparser.add_argument('--dir', help="Where to store the generated assessments", default=join(os.getcwd(),"Assessments") ) argparser.add_argument('--test', help="is a test?", action="store_true") args = argparser.parse_args() recommendations_fn = abspath(args.recommendations_fn) recommendations = Recommender.load_recommendations(recommendations_fn) G_I_ru_tm_fn = abspath(args.G_I_ru_tm_fn) G_F_fn = abspath(args.G_F_fn) dataset_dir = dirname(G_I_ru_tm_fn) out_dir = abspath(args.dir) if not exists(out_dir): os.makedirs(out_dir) vrt_fn = abspath(args.vrt_fn) with open(vrt_fn, "rb") as pf: valid_times_repos_list = cPickle.load(pf) valid_times_list = [a[0] for a in valid_times_repos_list] valid_repos_list = [a[1] for a in valid_times_repos_list] if args.test: