def main(year, min_count=None, outfile=None): """ Main function for training and evaluating AAE methods on IREON data """ if (CLEAN == True): print("Loading data from", DATA_PATH) papers = load(DATA_PATH) print("Cleaning data...") clean(CLEAN_DATA_PATH, papers) print("Clean data in {}".format(CLEAN_DATA_PATH)) return print("Loading data from", CLEAN_DATA_PATH) papers = load(CLEAN_DATA_PATH) print("Unpacking IREON data...") bags_of_papers, ids, side_info = unpack_papers(papers) del papers bags = Bags(bags_of_papers, ids, side_info) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2) print("Loading pre-trained embedding", W2V_PATH) with open(outfile, 'a') as fh: print("~ Partial List", "~" * 42, file=fh) evaluation(BASELINES + RECOMMENDERS) with open(outfile, 'a') as fh: print("~ Partial List + Titles", "~" * 42, file=fh) evaluation(TITLE_ENHANCED)
def main(): CONFIG = { 'pub': ('/data21/lgalke/datasets/citations_pmc.tsv', 2011, 50), 'eco': ('/data21/lgalke/datasets/econbiz62k.tsv', 2012, 1) } print("Loading pre-trained embedding", W2V_PATH) vectors = KeyedVectors.load_word2vec_format(W2V_PATH, binary=W2V_IS_BINARY) CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(vectors, dim=0)) ]) PARSER = argparse.ArgumentParser() PARSER.add_argument('data', type=str, choices=['pub', 'eco']) args = PARSER.parse_args() DATA = CONFIG[args.data] logfile = '/data22/ivagliano/test-irgan/' + args.data + '-decoder.log' bags = Bags.load_tabcomma_format(DATA[0]) c_year = DATA[1] evaluate = Evaluation(bags, year=c_year, logfile=logfile).setup(min_count=DATA[2], min_elements=2) user_num = evaluate.train_set.size()[0] + evaluate.test_set.size()[0] item_num = evaluate.train_set.size()[1] models = [IRGANRecommender(user_num, item_num, g_epochs=1, d_epochs=1, n_epochs=1, conditions=CONDITIONS)] evaluate(models)
def main(year, min_count=None, outfile=None, drop=1): """ Main function for training and evaluating AAE methods on IREON data """ if (CLEAN == True): print("Loading data from", DATA_PATH) papers = load(DATA_PATH) print("Cleaning data...") clean(CLEAN_DATA_PATH, papers) print("Clean data in {}".format(CLEAN_DATA_PATH)) return print("Loading data from", CLEAN_DATA_PATH) papers = load(CLEAN_DATA_PATH) print("Unpacking IREON data...") # bags_of_papers, ids, side_info = unpack_papers(papers) bags_of_papers, ids, side_info = unpack_papers_conditions(papers) del papers bags = Bags(bags_of_papers, ids, side_info) if args.compute_mi: from aaerec.utils import compute_mutual_info print("[MI] Dataset: IREON (fiv)") print("[MI] min Count:", min_count) tmp = bags.build_vocab(min_count=min_count, max_features=None) mi = compute_mutual_info(tmp, conditions=None, include_labels=True, normalize=True) with open('mi.csv', 'a') as mifile: print('IREON', min_count, mi, sep=',', file=mifile) print("=" * 78) exit(0) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2, drop=drop) # Use only partial citations/labels list (no additional metadata) with open(outfile, 'a') as fh: print("~ Partial List", "~" * 42, file=fh) evaluation(BASELINES + RECOMMENDERS) # Use additional metadata (as defined in CONDITIONS for all models but SVD, which uses only titles) with open(outfile, 'a') as fh: print("~ Conditioned Models", "~" * 42, file=fh) evaluation(CONDITIONED_MODELS)
def main(year, dataset, min_count=None, outfile=None, drop=1): """ Main function for training and evaluating AAE methods on DBLP data """ path = DATA_PATH + ("dblp-ref/" if dataset == "dblp" else "acm.txt") print("Loading data from", path) papers = papers_from_files(path, dataset, n_jobs=4) print("Unpacking {} data...".format(dataset)) bags_of_papers, ids, side_info = unpack_papers(papers) del papers bags = Bags(bags_of_papers, ids, side_info) if args.compute_mi: from aaerec.utils import compute_mutual_info print("[MI] Dataset:", dataset) print("[MI] min Count:", min_count) tmp = bags.build_vocab(min_count=min_count, max_features=None) mi = compute_mutual_info(tmp, conditions=None, include_labels=True, normalize=True) with open('mi.csv', 'a') as mifile: print(dataset, min_count, mi, sep=',', file=mifile) print("=" * 78) exit(0) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2, drop=drop) # To evaluate the baselines and the recommenders without metadata (or just the recommenders without metadata) # with open(outfile, 'a') as fh: # print("~ Partial List", "~" * 42, file=fh) # evaluation(BASELINES + RECOMMENDERS) # evaluation(RECOMMENDERS, batch_size=1000) with open(outfile, 'a') as fh: print("~ Partial List + Titles + Author + Venue", "~" * 42, file=fh) # To evaluate SVD with titles # evaluation(TITLE_ENHANCED) evaluation(CONDITIONED_MODELS, batch_size=1000)
def main(): """ Evaluates the VAE Recommender """ CONFIG = { 'pub': ('/data21/lgalke/datasets/citations_pmc.tsv', 2011, 50), 'eco': ('/data21/lgalke/datasets/econbiz62k.tsv', 2012, 1) } PARSER = argparse.ArgumentParser() PARSER.add_argument('data', type=str, choices=['pub', 'eco']) args = PARSER.parse_args() DATA = CONFIG[args.data] logfile = '/data22/ivagliano/test-vae/' + args.data + '-hyperparams-opt.log' bags = Bags.load_tabcomma_format(DATA[0]) c_year = DATA[1] evaluate = Evaluation(bags, year=c_year, logfile=logfile).setup(min_count=DATA[2], min_elements=2) print("Loading pre-trained embedding", W2V_PATH) vectors = KeyedVectors.load_word2vec_format(W2V_PATH, binary=W2V_IS_BINARY) params = { #'n_epochs': 10, 'batch_size': 100, 'optimizer': 'adam', # 'normalize_inputs': True, } CONDITIONS = ConditionList([('title', PretrainedWordEmbeddingCondition(vectors))]) # 100 hidden units, 200 epochs, bernoulli prior, normalized inputs -> 0.174 # activations = ['ReLU','SELU'] # lrs = [(0.001, 0.0005), (0.001, 0.001)] hcs = [(100, 50), (300, 100)] epochs = [50, 100, 200, 500] # dropouts = [(.2,.2), (.1,.1), (.1, .2), (.25, .25), (.3,.3)] # .2,.2 is best # priors = ['categorical'] # gauss is best # normal = [True, False] # bernoulli was good, letz see if categorical is better... No import itertools models = [ VAERecommender(conditions=CONDITIONS, **params, n_hidden=hc[0], n_code=hc[1], n_epochs=e) for hc, e in itertools.product(hcs, epochs) ] # models = [VAERecommender(conditions=CONDITIONS, **params)] evaluate(models)
def main(year, dataset, min_count=None, outfile=None): """ Main function for training and evaluating AAE methods on DBLP data """ path = DATA_PATH + ("dblp-ref/" if dataset == "dblp" else "acm.txt") print("Loading data from", path) papers = papers_from_files(path, dataset, n_jobs=4) print("Unpacking {} data...".format(dataset)) bags_of_papers, ids, side_info = unpack_papers(papers) del papers bags = Bags(bags_of_papers, ids, side_info) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2) print("Loading pre-trained embedding", W2V_PATH) with open(outfile, 'a') as fh: print("~ Partial List", "~" * 42, file=fh) evaluation(BASELINES + RECOMMENDERS) with open(outfile, 'a') as fh: print("~ Partial List + Titles", "~" * 42, file=fh) evaluation(TITLE_ENHANCED)
PARSER = argparse.ArgumentParser() PARSER.add_argument('dataset', type=str, help='path to dataset') PARSER.add_argument('year', type=int, help='First year of the testing set.') PARSER.add_argument('-m', '--min-count', type=int, help='Pruning parameter', default=50) PARSER.add_argument('-o', '--outfile', type=str, default=None) ARGS = PARSER.parse_args() DATASET = Bags.load_tabcomma_format(ARGS.dataset, unique=True) EVAL = Evaluation(DATASET, ARGS.year, logfile=ARGS.outfile) EVAL.setup(min_count=ARGS.min_count, min_elements=2) print("Loading pre-trained embedding", W2V_PATH) VECTORS = KeyedVectors.load_word2vec_format(W2V_PATH, binary=W2V_IS_BINARY) BASELINES = [ # RandomBaseline(), # MostPopular(), Countbased(), SVDRecommender(1000, use_title=False), ] ae_params = { 'n_code': 50, 'n_epochs': 100, 'embedding': VECTORS,
def main(year, dataset, min_count=None, outfile=None, drop=1, baselines=False, autoencoders=False, conditioned_autoencoders=False, all_metadata=True): """ Main function for training and evaluating AAE methods on DBLP data """ assert baselines or autoencoders or conditioned_autoencoders, "Please specify what to run" if all_metadata: # V2 - all metadata CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(VECTORS)), ('venue', PretrainedWordEmbeddingCondition(VECTORS)), ( 'author', CategoricalCondition( embedding_dim=32, reduce="sum", # vocab_size=0.01, sparse=False, embedding_on_gpu=True)) ]) else: # V1 - only title metadata CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(VECTORS)) ]) #### CONDITOINS defined ALL_MODELS = [] if baselines: # Models without metadata BASELINES = [ # RandomBaseline(), # MostPopular(), Countbased(), SVDRecommender(1000, use_title=False) ] ALL_MODELS += BASELINES if not all_metadata: # SVD can use only titles not generic conditions ALL_MODELS += [SVDRecommender(1000, use_title=True)] if autoencoders: AUTOENCODERS = [ AAERecommender(adversarial=False, conditions=None, lr=0.001, **AE_PARAMS), AAERecommender(adversarial=True, conditions=None, gen_lr=0.001, reg_lr=0.001, **AE_PARAMS), VAERecommender(conditions=None, **AE_PARAMS), DAERecommender(conditions=None, **AE_PARAMS) ] ALL_MODELS += AUTOENCODERS if conditioned_autoencoders: # Model with metadata (metadata used as set in CONDITIONS above) CONDITIONED_AUTOENCODERS = [ AAERecommender(adversarial=False, conditions=CONDITIONS, lr=0.001, **AE_PARAMS), AAERecommender(adversarial=True, conditions=CONDITIONS, gen_lr=0.001, reg_lr=0.001, **AE_PARAMS), DecodingRecommender(CONDITIONS, n_epochs=100, batch_size=1000, optimizer='adam', n_hidden=100, lr=0.001, verbose=True), VAERecommender(conditions=CONDITIONS, **AE_PARAMS), DAERecommender(conditions=CONDITIONS, **AE_PARAMS) ] ALL_MODELS += CONDITIONED_AUTOENCODERS print("Finished preparing models:", *ALL_MODELS, sep='\n\t') path = DATA_PATH + ("dblp-ref/" if dataset == "dblp" else "acm.txt") print("Loading data from", path) papers = papers_from_files(path, dataset, n_jobs=4) print("Unpacking {} data...".format(dataset)) bags_of_papers, ids, side_info = unpack_papers(papers) del papers bags = Bags(bags_of_papers, ids, side_info) if args.compute_mi: from aaerec.utils import compute_mutual_info print("[MI] Dataset:", dataset) print("[MI] min Count:", min_count) tmp = bags.build_vocab(min_count=min_count, max_features=None) mi = compute_mutual_info(tmp, conditions=None, include_labels=True, normalize=True) with open('mi.csv', 'a') as mifile: print(dataset, min_count, mi, sep=',', file=mifile) print("=" * 78) exit(0) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2, drop=drop) with open(outfile, 'a') as fh: print("~ Partial List + Titles + Author + Venue", "~" * 42, file=fh) evaluation(ALL_MODELS, batch_size=1000)