def get_json_from_cloud(date): d.main(is_download_file_function=bool(True), download_drive_service_name=(date + '.json'), download_file_path=os.getcwd() + '/') with open((date + '.json'), 'r', encoding='utf-8') as f: dataset = json.load(f) return dataset
def get_fine_dataset_date(): d.main(is_download_file_function=bool(True), download_drive_service_name=('fine_dataset.json'), download_file_path=os.getcwd() + '/') with open(('fine_dataset.json'), 'r', encoding='utf-8') as f: date_list = json.load(f) # date_list=['1021','1022','1023','1024','1025','1026','1027','1110','1011','1012'] return date_list
def main(args): """ Main running script """ # Get the config file config = util.get_config(args.config) root_dir = config['ROOT_DIR'] # fill out initial folders if not os.path.isdir('{}/metadata'.format(root_dir)): os.mkdir('{}/metadata'.format(root_dir)) print('created metadata dir') if not os.path.isdir('{}'.format(config['OBS_ROOT'])): os.mkdir('{}'.format(config['OBS_ROOT'])) print('created OBS dir') if not os.path.isdir('{}'.format(config['ESTIMATORS_ROOT'])): os.mkdir('{}'.format(config['ESTIMATORS_ROOT'])) print('created ESTIMATORS dir') if not os.path.isdir('{}'.format(config['PREDICTIONS_ROOT'])): os.mkdir('{}'.format(config['PREDICTIONS_ROOT'])) print('created PREDICTIONS dir') if not os.path.isdir('{}'.format(config['QAQC_ROOT'])): os.mkdir('{}'.format(config['QAQC_ROOT'])) print('created QAQC dir') if not os.path.isdir('{}'.format(config['PLOT_ROOT'])): os.mkdir('{}'.format(config['PLOT_ROOT'])) print('created PLOT dir') # --- download data --- if args.clean: clean.main(config) else: print('skipping database cleaning') # --- download data --- if args.download: download.main(config) else: print('skipping download of new data') # --- train models if args.train: train.main(config) else: print('skip training') # --- make predictions --- if args.predict: predict.main(config) else: print('skipping download of new data') # --- run qaqc checks --- if args.qaqc: qaqc.main(config) else: print('skipping qaqc') # --- plot --- if args.plot: plot.main(config) else: print('skipping plots')
def test_valid_download_additional_files(self, valid_credentials_path, monkeypatch): # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -b -c "{1}"'.format( APK_WITH_OBB, valid_credentials_path).split()) monkeypatch.setattr(download, 'get_cmd_args', lambda: arguments) # If this runs without errors, the apk and the additional files will be saved in the Downloads folder # (created in the same folder as download.py). download.main()
def test_download_wrong_credentials(self, download_folder_path, wrong_credentials_path, monkeypatch): # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}"'.format( VALID_PACKAGE_NAME, wrong_credentials_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) with pytest.raises(SystemExit) as err: download.main() assert err.value.code == 1
def test_valid_download_default_location(self, valid_credentials_path, monkeypatch): # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}"'.format( VALID_PACKAGE_NAME, valid_credentials_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) # If this runs without errors, the apk will be saved in the Downloads folder # (created in the same folder as download.py). download.main()
def test_valid_download_split_apk(self, valid_credentials_path, monkeypatch): # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}"'.format( APK_WITH_SPLIT_APK, valid_credentials_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) # If this runs without errors, the apk and the split apk(s) will be saved in # the Downloads folder (created in the same folder as download.py). download.main()
def Download(Keyword): amount = '50' print("start download :",Keyword,"...") download.main(service,Keyword,amount) subprocess.call(['python','video_filter.py',Keyword,amount]) data= {"finish":True} resp = Response(json.dumps(data)) resp.headers['Access-Control-Allow-Origin'] = '*' resp.headers['Content-Type'] = 'application/json' return resp
def test_download_app_details_error(self, valid_credentials_path, monkeypatch): # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}"'.format( VALID_PACKAGE_NAME, valid_credentials_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) # Mock the Playstore. monkeypatch.setattr(Playstore, "app_details", lambda self, package: None) with pytest.raises(SystemExit) as err: download.main() assert err.value.code == 1
def test_download(self): download_link = 'http://filepi.com/i/RSpHA1T' ebook_id = '1529159300' ebook_title = 'Expert Oracle and Java Security' args = 'program --download-from-itebooks {} {} {}'.format(download_link, ebook_id, ebook_title).split() with patch.object(sys, 'argv', args): ret = download.main(self.wf) download_folder = os.path.expanduser(itebooks.default_download_folder) file_name = 'Expert Oracle and Java Security.pdf' file_path = os.path.join(download_folder, file_name) self.assertEqual(ret, 0) self.assertTrue(os.path.exists(file_path)) # Removes the downloaded file try: os.remove(file_path) except OSError: pass
def test_valid_download_specific_location(self, download_folder_path, valid_credentials_path, monkeypatch): downloaded_apk_path = ( f"{os.path.join(download_folder_path, VALID_PACKAGE_NAME)}.apk") # Mock the command line parser. arguments = download.get_cmd_args( f'"{VALID_PACKAGE_NAME}" ' f'-c "{valid_credentials_path}" ' f'-o "{downloaded_apk_path}"'.split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) download.main() assert os.path.isfile(downloaded_apk_path) is True
def test_valid_download_specific_location(self, download_folder_path, valid_credentials_path, monkeypatch): downloaded_apk_path = "{0}.apk".format( os.path.join(download_folder_path, VALID_PACKAGE_NAME)) # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}" -o "{2}"'.format( VALID_PACKAGE_NAME, valid_credentials_path, downloaded_apk_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) download.main() assert os.path.isfile(downloaded_apk_path) is True
def interface_download(self): self.top_module.console_clear() # output download.f_print = self.print_to_console # args args = Arg() args.semester = self.lineEdit_semester.text() args.i = self.lineEdit_input.text() args.download = True # setting self.settings.setValue('DefaultSemester', args.semester) self.settings.setValue('DefaultConfigFile', args.i) # run try: download.main(args) except Exception: print("执行错误!")
def test_download_error(self, download_folder_path, valid_credentials_path, monkeypatch): downloaded_apk_path = "{0}.apk".format( os.path.join(download_folder_path, "error", VALID_PACKAGE_NAME)) # Mock the command line parser. arguments = download.get_cmd_args('"{0}" -c "{1}" -o "{2}"'.format( VALID_PACKAGE_NAME, valid_credentials_path, downloaded_apk_path).split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) # Mock the Playstore. monkeypatch.setattr(Playstore, "download", lambda self, package, path, download_obb: False) with pytest.raises(SystemExit) as err: download.main() assert err.value.code == 1
def Analyze(Keyword): ##total flow amount = '50' print("start download :",Keyword,"...") # subprocess.call(['python','download.py',Keyword,amount]) download.main(service,Keyword,amount) subprocess.call(['python','video_filter.py',Keyword,amount]) print("start comment analyze :",Keyword,"...") subprocess.call(['python','comment_analyze.py',Keyword,amount]) print("start caption analyze :",Keyword,"...") subprocess.call(['python','caption_analyze.py',Keyword,amount]) print("start write into json file : ....") subprocess.call(['python','read_analyze_result.py',Keyword,amount]) data= {"finish":True} resp = Response(json.dumps(data)) resp.headers['Access-Control-Allow-Origin'] = '*' resp.headers['Content-Type'] = 'application/json' return resp
def main(): today = datetime.datetime.now().strftime('%Y-%m-%d') log("***********************" + str(today) + "******************************") log("writemc:" + str(today) + " start at " + str(time.asctime())) download.main() log("donwnload:" + str(today) + " end at " + str(time.asctime())) #print " end at "+str(time.asctime()) process.main() log("process:" + str(today) + " end at " + str(time.asctime())) print "process:" + str(today) + " end at " + str(time.asctime()) processcl.main() log("processcl:" + str(today) + " end at " + str(time.asctime())) print "processcl:" + str(today) + " end at " + str(time.asctime()) processdh.main() log("processdh:" + str(today) + " end at " + str(time.asctime())) print "processdh:" + str(today) + " end at " + str(time.asctime()) importmemdh.main() log("importmemdh:" + str(today) + " end at " + str(time.asctime())) print "importmemdh:" + str(today) + " end at " + str(time.asctime()) processrep.main() log("processrep:" + str(today) + " end at " + str(time.asctime())) print "processrep:" + str(today) + " end at " + str(time.asctime()) shangxiaxian_jisuan.main() log("shangxiaxian_jisuan" + str(today) + " end at " + str(time.asctime())) print "shangxiaxian_jisuan" + str(today) + " end at " + str(time.asctime()) #importmem.main("select braid from branch where braid = '02058'") #log("impormem:"+str(today)+" end at "+str(time.asctime())) log("writemc:" + str(today) + " end at " + str(time.asctime()))
def main(): today = datetime.datetime.now().strftime("%Y-%m-%d") log("***********************" + str(today) + "******************************") log("writemc:" + str(today) + " start at " + str(time.asctime())) download.main() log("donwnload:" + str(today) + " end at " + str(time.asctime())) # print " end at "+str(time.asctime()) process.main() log("process:" + str(today) + " end at " + str(time.asctime())) print "process:" + str(today) + " end at " + str(time.asctime()) processcl.main() log("processcl:" + str(today) + " end at " + str(time.asctime())) print "processcl:" + str(today) + " end at " + str(time.asctime()) processdh.main() log("processdh:" + str(today) + " end at " + str(time.asctime())) print "processdh:" + str(today) + " end at " + str(time.asctime()) importmemdh.main() log("importmemdh:" + str(today) + " end at " + str(time.asctime())) print "importmemdh:" + str(today) + " end at " + str(time.asctime()) processrep.main() log("processrep:" + str(today) + " end at " + str(time.asctime())) print "processrep:" + str(today) + " end at " + str(time.asctime()) shangxiaxian_jisuan.main() log("shangxiaxian_jisuan" + str(today) + " end at " + str(time.asctime())) print "shangxiaxian_jisuan" + str(today) + " end at " + str(time.asctime()) # importmem.main("select braid from branch where braid = '02058'") # log("impormem:"+str(today)+" end at "+str(time.asctime())) log("writemc:" + str(today) + " end at " + str(time.asctime()))
def test_download_error(self, download_folder_path, valid_credentials_path, monkeypatch): downloaded_apk_path = ( f"{os.path.join(download_folder_path, 'error', VALID_PACKAGE_NAME)}.apk" ) # Mock the command line parser. arguments = download.get_cmd_args( f'"{VALID_PACKAGE_NAME}" ' f'-c "{valid_credentials_path}" ' f'-o "{downloaded_apk_path}"'.split()) monkeypatch.setattr(download, "get_cmd_args", lambda: arguments) # Mock the Playstore. monkeypatch.setattr( Playstore, "download", lambda self, package, path, download_obb, download_split_apks: False, ) with pytest.raises(SystemExit) as err: download.main() assert err.value.code == 1
def download(args=None): """Console script for qi_irida_utils.""" download.main() return 0
import download download.main()
def main(): download.main() aggregate.main()
# Set Parameter classes = parameter.classes num_classes = parameter.num_classes # Creaete Save Dir database_path_current = parameter.database_path if not os.path.exists(database_path_current): os.mkdir(database_path_current) # Copy Parameter File shutil.copyfile("./parameter.py", database_path_current + "/parameter.py") # Download Image for i_class in range(num_classes): if not os.path.exists(database_path + "/" + classes[i_class]): download.main(classes[i_class]) # 4-fold cross validation (1:3 x 4) for i_cross_num in range(1, 1 + parameter.cross_num): print(i_cross_num) database_path_current_cross = database_path_current + \ "/cross" + str(i_cross_num) if not os.path.exists(database_path_current_cross): os.mkdir(database_path_current_cross) if not os.path.exists(database_path_current_cross + "/animal_.npy"): print("===== Generate Data =====") generate_data.main(i_cross_num) animal_cnn.main(i_cross_num)
help="filename to save the file under") arguments = parser.parse_args() args = vars(arguments) VERBOSE = args.get("verbose", False) log = Logger("workflow.log", "workflow.py", True, True) utils.log_header(log, DESCRIPTION, VERBOSE) try: log.debug("Debug mode activated.", VERBOSE) log.debug("Args: ", VERBOSE) for i in args: log.debug(i + ": " + str(args[i]), VERBOSE) if not args.get("test", False): download.main(args, logger=log) if args.get("facerec", None) != None and not args.get("test", False): if "all" in args.get("facerec", []): # pass all videos from utils.getVideos() to facerec facerec.main({"files": utils.getVideos()}) else: # pass all videos from args to facerec facerec.main({"files": args.get("facerec", [])}) if not args.get("test", False): convert.main(args, log) if not args.get("test", False): transfer.main(args, log) log.success("Workflow routine finished!", not args.get("silent", False)) except KeyboardInterrupt: log.context = "workflow.py" log.warning("exiting...", True)
def setup(): download.main() process.main()
import download download.main(secure=True)
import download import sys import os # print(os.path.abspath('.')) sys.argv.extend([ '/private/var/mobile/Library/Mobile Documents/com~apple~CloudDocs/Documents/wuecampus' ]) download.main()
import setup_checks import download import sys if __name__ == "__main__": # Get command line args if len(sys.argv) >= 2: name_filter = str(sys.argv[1]) else: name_filter = "all" # Run main function download.main(name_filter, True)
import download import metadata import mystem from bs4 import BeautifulSoup as soup import tree import crowler import create_csv tree.make_folders() list_of_urls = crowler.main(2) create_csv.create_csv() for url in list_of_urls: if download.main(url) != 0: print("Страница {0} скачана".format(url)) meta_data = metadata.main(url) print("Метаданные собраны") date = meta_data["created"] date = date.split(".") month = str(int( date[1])) # чтобы убрать первый ноль в месяцах вроде мая year = date[2] num_page = url.split('/')[-1].split(".")[0] file_path = "izvestiaur\\plain\\{0}\\{1}\\{2}.txt".format( year, month, num_page) meta_data["path"] = file_path create_csv.append_to_csv(meta_data) with open("text.html", "r", encoding="utf-8") as f: text = f.read() soup_html = soup(text, "lxml") download.add_meta(url, meta_data) with open("plain_text.txt", encoding="utf-8") as f:
#!/usr/bin/env python # * coding: utf8 * ''' DownloadTool.py An Esri Toolbox Tool wrapper for download.py. input parameters: 0 - ids:String a semi-colon delimited list of sampling event ids output parameters: 1 - zipfile:File the url to the .zip file for download ''' from arcpy import GetParameterAsText, SetParameterAsText from download import main SetParameterAsText(2, main(GetParameterAsText(0), GetParameterAsText(1)))