test_labels=test_labels, wandb="wandb", trial=str(trial), ) elif args.method == "sub-enc-lstm": print("Change method to sub-lstm") else: assert False, "method {} has no trainer".format(args.method) ( results[trial][0], results[trial][1], results[trial][2], results[trial][3], ) = trainer.pre_train(tr_eps, val_eps, test_eps) np_results = results.numpy() tresult_csv = os.path.join(args.path, "test_results" + sID + ".csv") np.savetxt(tresult_csv, np_results, delimiter=",") elapsed = time.time() - start_time print("total time = ", elapsed) if __name__ == "__main__": parser = get_argparser() args = parser.parse_args() tags = ["pretraining-only"] config = {} config.update(vars(args)) train_encoder(args)
# -*- coding: utf-8 -*- import os import sklearn.metrics from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import LinearRegression from pyspark.storagelevel import StorageLevel ## CUSTOM IMPORT import conf from src import american_community_survey as amc from src import utils from src import download_spark ## START # Initiate the parser args = utils.get_argparser().parse_args() utils.printNowToFile("starting:") utils.printNowToFile("downloading spark") download_spark.download(os.getcwd()) ############################################################### if args.host and args.port: spark = conf.load_conf(args.host, args.port) else: spark = conf.load_conf_default() spark.sparkContext.addPyFile('ridge_regression.py') import ridge_regression as rr