def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model', type=str, defaule='aae', # All possible method should appear here choices=['cm', 'svd', 'ae', 'aae', 'mlp'], help="Specify the model to use [aae]") parser.add_argument('--epochs', type=int, default=20, help="Specify the number of training epochs [50]") parser.add_argument('--hidden', type=int, default=200, help="Number of hidden units [100]") parser.add_argument('--no-title', action='store_false', default=True, dest='use_title', help="Do not use the playlist titles") parser.add_argument('--max-items', type=int, default=75000, help="Limit the max number of considered items") parser.add_argument( '--vocab-size', type=int, default=50000, help="Limit the max number of distinct condition words") parser.add_argument('-j', '--jobs', type=int, default=4, help="Number of jobs for data loading [4].") parser.add_argument('-o', '--outfile', default="submission.csv", type=str, help="Write submissions to this path") parser.add_argument('--use-embedding', default=False, action='store_true', help="Use embedding (SGNS GoogleNews) [false]") parser.add_argument('--dont-aggregate', action='store_false', dest='aggregate', default=True, help="Aggregate track metadata as side info input") parser.add_argument('--debug', action='store_true', default=False, help="Activate debug mode, run only on small sample") parser.add_argument( '-x', '--exclude', type=argparse.FileType('r'), default=None, help="Path to file with slice filenames to exclude for training") parser.add_argument( '--dev', type=str, default=None, help='Path to dev set, use in combination with (-x, --exclude)') parser.add_argument('--no-idf', action='store_false', default=True, dest='use_idf', help="Do **not** use idf re-weighting") parser.add_argument('--lr', type=float, default=0.001, help="Initial learning rate [0.001]") parser.add_argument('--code', type=int, default=100, help="Code dimension [50]") args = parser.parse_args() # Either exclude and dev set, or no exclude and test set assert (args.dev is None) == (args.exclude is None) if args.dev is not None: print("Making submission for dev set:", args.dev) assert os.path.isfile(args.dev) # Dump args into submission file if os.path.exists(args.outfile) and \ input("Path '{}' exists. Overwrite? [y/N]" .format(args.outfile)) != 'y': exit(-1) with open(args.outfile, 'w') as out: print('#', args, file=out) if args.use_embedding: print("Loading embedding:", W2V_PATH) vectors = KeyedVectors.load_word2vec_format(W2V_PATH, binary=W2V_IS_BINARY) else: vectors = None # Create the model as specified by command line args # Count-based never uses title # Decoding recommender always uses title tfidf_params = {'max_features': args.vocab_size, 'use_idf': args.use_idf} model = { 'cm': Countbased(), 'svd': SVDRecommender(use_title=args.use_title), 'ae': AAERecommender(use_title=args.use_title, adversarial=False, n_hidden=args.hidden, n_code=args.code, n_epochs=args.epochs, embedding=vectors, lr=args.lr, tfidf_params=tfidf_params), 'aae': AAERecommender( use_title=args.use_title, adversarial=True, n_hidden=args.hidden, n_code=args.code, n_epochs=args.epochs, gen_lr=args.lr, reg_lr=args.lr, # same gen and reg lrs embedding=vectors, tfidf_params=tfidf_params), 'mlp': DecodingRecommender(n_epochs=args.epochs, n_hidden=args.hidden, embedding=vectors, tfidf_params=tfidf_params) }[args.model] track_attrs = TRACK_INFO if args.aggregate else None if args.exclude is not None: # Dev set case, exclude dev set data exclude = [line.strip() for line in args.exclude] else: # Real submission case, do not exclude any training data exclude = None # = Training = print("Loading data from {} using {} jobs".format(DATA_PATH, args.jobs)) playlists = playlists_from_slices(DATA_PATH, n_jobs=args.jobs, debug=args.debug, without=exclude) print("Unpacking playlists") train_set = Bags(*unpack_playlists(playlists, aggregate=track_attrs)) print("Building vocabulary of {} most frequent items".format( args.max_items)) vocab, __counts = train_set.build_vocab(max_features=args.max_items, apply=False) train_set = train_set.apply_vocab(vocab) print("Training set:", train_set, sep='\n') print("Training for {} epochs".format(args.epochs)) try: model.train(train_set) except KeyboardInterrupt: print("Training interrupted by keyboard, pass.") # Not required anymore del train_set # = Predictions = if args.dev is not None: print("Loading and unpacking DEV set") data, index2playlist, side_info = unpack_playlists( load(args.dev), aggregate=track_attrs) else: print("Loading and unpacking test set") data, index2playlist, side_info = unpack_playlists( load(TEST_PATH), aggregate=track_attrs) test_set = Bags(data, index2playlist, side_info) # Apply same vocabulary as in training test_set = test_set.apply_vocab(vocab) print("Test set:", test_set, sep='\n') pred = model.predict(test_set) if sp.issparse(pred): pred = pred.toarray() else: pred = np.asarray(pred) print("Scaling and removing non-missing items") pred = remove_non_missing(pred, test_set.tocsr(), copy=False) index2trackid = {v: k for k, v in vocab.items()} print("Making submission:", args.outfile) make_submission(pred, index2playlist, index2trackid, outfile=args.outfile) print("Success.") print("Make sure to verify the submission format via", VERIFY_SCRIPT)
'n_epochs': 50, 'batch_size': 500, 'n_hidden': 100, 'normalize_inputs': True, } # Models without metadata BASELINES = [ # RandomBaseline(), # MostPopular(), Countbased(), SVDRecommender(1000, use_title=False), ] RECOMMENDERS = [ AAERecommender(adversarial=False, lr=0.001, **ae_params), AAERecommender(prior='gauss', gen_lr=0.001, reg_lr=0.001, **ae_params), VAERecommender(conditions=None, **vae_params), DAERecommender(conditions=None, **ae_params) ] # Metadata to use CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(VECTORS)), # ('author', CategoricalCondition(embedding_dim=32, reduce="sum", # sparse=True, embedding_on_gpu=True)) ]) # Model with metadata (metadata used as set in CONDITIONS above) CONDITIONED_MODELS = [ # TODO SVD can use only titles not generic conditions
# MostPopular(), Countbased(), SVDRecommender(1000, use_title=False), ] ae_params = { 'n_code': 50, 'n_epochs': 100, 'embedding': VECTORS, 'batch_size': 100, 'n_hidden': 100, 'normalize_inputs': True, } RECOMMENDERS = [ AAERecommender(use_title=False, adversarial=False, lr=0.001, **ae_params), AAERecommender(use_title=False, prior='gauss', gen_lr=0.001, reg_lr=0.001, **ae_params), ] TITLE_ENHANCED = [ SVDRecommender(1000, use_title=True), DecodingRecommender(n_epochs=100, batch_size=100, optimizer='adam', n_hidden=100, embedding=VECTORS, lr=0.001,
#N_WORDS = 50000 #TFIDF_PARAMS = { 'max_features': N_WORDS } W2V_PATH = "/data21/lgalke/vectors/GoogleNews-vectors-negative300.bin.gz" W2V_IS_BINARY = True VECTORS = KeyedVectors.load_word2vec_format(W2V_PATH, binary=W2V_IS_BINARY) # These need to be implemented in evaluation.py METRICS = ['mrr'] MODELS = [ # Only item sets Countbased(), SVDRecommender(1000, use_title=False), AAERecommender(adversarial=True, use_title=False, n_epochs=55, embedding=VECTORS), AAERecommender(adversarial=False, use_title=False, n_epochs=55, embedding=VECTORS), # Title-enhanced SVDRecommender(1000, use_title=True), AAERecommender(adversarial=True, use_title=True, n_epochs=55, embedding=VECTORS), AAERecommender(adversarial=False, use_title=True, n_epochs=55, embedding=VECTORS), DecodingRecommender(n_epochs=55, embedding=VECTORS) # Put more here... ] def load(path): """ Loads a single slice """ with open(path, 'r') as fhandle: obj = json.load(fhandle) return obj["playlists"]
from aaerec.aae import AAERecommender, DecodingRecommender # Should work on kdsrv03 DATA_PATH = "/data21/lgalke/MPD/data/" DEBUG_LIMIT = None # Use only this many most frequent items N_ITEMS = 50000 # Use all present items # N_ITEMS = None # These need to be implemented in evaluation.py METRICS = ['mrr', 'map'] MODELS = [ Countbased(), AAERecommender(adversarial=True, use_title=True, n_epochs=25), AAERecommender(adversarial=False, use_title=True, n_epochs=25), DecodingRecommender(n_epochs=25) # Put more here... ] def load(path): """ Loads a single slice """ with open(path, 'r') as fhandle: obj = json.load(fhandle) return obj["playlists"] def playlists_from_slices(slices_dir, n_jobs=1,
def main(year, dataset, min_count=None, outfile=None, drop=1, baselines=False, autoencoders=False, conditioned_autoencoders=False, all_metadata=True): """ Main function for training and evaluating AAE methods on DBLP data """ assert baselines or autoencoders or conditioned_autoencoders, "Please specify what to run" if all_metadata: # V2 - all metadata CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(VECTORS)), ('venue', PretrainedWordEmbeddingCondition(VECTORS)), ( 'author', CategoricalCondition( embedding_dim=32, reduce="sum", # vocab_size=0.01, sparse=False, embedding_on_gpu=True)) ]) else: # V1 - only title metadata CONDITIONS = ConditionList([ ('title', PretrainedWordEmbeddingCondition(VECTORS)) ]) #### CONDITOINS defined ALL_MODELS = [] if baselines: # Models without metadata BASELINES = [ # RandomBaseline(), # MostPopular(), Countbased(), SVDRecommender(1000, use_title=False) ] ALL_MODELS += BASELINES if not all_metadata: # SVD can use only titles not generic conditions ALL_MODELS += [SVDRecommender(1000, use_title=True)] if autoencoders: AUTOENCODERS = [ AAERecommender(adversarial=False, conditions=None, lr=0.001, **AE_PARAMS), AAERecommender(adversarial=True, conditions=None, gen_lr=0.001, reg_lr=0.001, **AE_PARAMS), VAERecommender(conditions=None, **AE_PARAMS), DAERecommender(conditions=None, **AE_PARAMS) ] ALL_MODELS += AUTOENCODERS if conditioned_autoencoders: # Model with metadata (metadata used as set in CONDITIONS above) CONDITIONED_AUTOENCODERS = [ AAERecommender(adversarial=False, conditions=CONDITIONS, lr=0.001, **AE_PARAMS), AAERecommender(adversarial=True, conditions=CONDITIONS, gen_lr=0.001, reg_lr=0.001, **AE_PARAMS), DecodingRecommender(CONDITIONS, n_epochs=100, batch_size=1000, optimizer='adam', n_hidden=100, lr=0.001, verbose=True), VAERecommender(conditions=CONDITIONS, **AE_PARAMS), DAERecommender(conditions=CONDITIONS, **AE_PARAMS) ] ALL_MODELS += CONDITIONED_AUTOENCODERS print("Finished preparing models:", *ALL_MODELS, sep='\n\t') path = DATA_PATH + ("dblp-ref/" if dataset == "dblp" else "acm.txt") print("Loading data from", path) papers = papers_from_files(path, dataset, n_jobs=4) print("Unpacking {} data...".format(dataset)) bags_of_papers, ids, side_info = unpack_papers(papers) del papers bags = Bags(bags_of_papers, ids, side_info) if args.compute_mi: from aaerec.utils import compute_mutual_info print("[MI] Dataset:", dataset) print("[MI] min Count:", min_count) tmp = bags.build_vocab(min_count=min_count, max_features=None) mi = compute_mutual_info(tmp, conditions=None, include_labels=True, normalize=True) with open('mi.csv', 'a') as mifile: print(dataset, min_count, mi, sep=',', file=mifile) print("=" * 78) exit(0) log("Whole dataset:", logfile=outfile) log(bags, logfile=outfile) evaluation = Evaluation(bags, year, logfile=outfile) evaluation.setup(min_count=min_count, min_elements=2, drop=drop) with open(outfile, 'a') as fh: print("~ Partial List + Titles + Author + Venue", "~" * 42, file=fh) evaluation(ALL_MODELS, batch_size=1000)