def runMain(): if utils.inputMatch([]): utils.print_header('expe') utils.print_option("step1", "launch cluster, and cut image") utils.print_option("step2", "deploy image on hdfs") utils.print_option("step3", "get status of image") utils.print_option("step4", "produce L2") utils.print_option("step5", "view produced image") utils.print_option("view", "view distributed") utils.print_option("clean", "clear hdfs ") utils.print_option("full", "run all") utils.print_option("run-perou", "run perou") utils.print_option("view-perou", "view perou") utils.print_option("full-chine", "cluster -> china") utils.print_option("run-chine", "run china") utils.print_option("deep-restore", "restore deep models") utils.print_option("deep-sync", "restore deep models") utils.print_option("deep-download", "restore deep models") utils.print_header('') if utils.inputMatch(['deep-restore']): restoreDeep() if utils.inputMatch(['deep-sync']): syncDeep() if utils.inputMatch(['deep-download']): downloadDeep() if utils.inputMatch(['deep-retrosync']): retrosyncDeep() if utils.inputMatch(['deep-tar']): uploadDeep() if utils.inputMatch(['run-perou']): runBigPerou() if utils.inputMatch(['view-perou']): viewPerou() if utils.inputMatch(['view']): viewProductionDistributed() if utils.inputMatch(['clean']): cleanHdfs() if utils.inputMatch(['full']): full() if utils.inputMatch(['full-chine']): fullChine() if utils.inputMatch(['step1']): main.setupFull() additionPython() cutBrisbane() if utils.inputMatch(['step1b']): cutBrisbane() if utils.inputMatch(['step2']): storeBrisbaneOnHdfs() if utils.inputMatch(['step3']): getFileStatus() if utils.inputMatch(['step4']): runProduction() if utils.inputMatch(['step5']): viewProduction() if utils.inputMatch(['kill']): cluster.instanceKillAll()
utils.print_option('cluster', 'cluster launch, kill, ssh etc ', hassub=True) utils.print_option('commondata', 'tools for handling the common data disk', hassub=True) utils.print_option('hadoop', 'generates hadoop files and upload', hassub=True) utils.print_option('spark', 'generates spark files and upload', hassub=True) utils.print_option('package', 'interact with package distribution', hassub=True) # utils.print_option('test') # utils.print_option('interact') # utils.print_option('rsync') utils.print_header("") if utils.inputMatch(['full']): setupFull() if utils.inputMatch(['kill']): cluster.instanceKillAll() if utils.inputMatch(['cluster'], doPartial=True): sys.argv = sys.argv[1:] cluster.main() if utils.inputMatch(['hadoop'], doPartial=True): sys.argv = sys.argv[1:] hadoop.main() if utils.inputMatch(['spark'], doPartial=True): sys.argv = sys.argv[1:] spark.main() if utils.inputMatch(['package'], doPartial=True): sys.argv = sys.argv[1:]