def download_process(self, section, url, download_path, target): url = STORAGE_URL + url self.progress_text['text'] = section + ' 스킨 다운로드를 시작합니다.' time.sleep(1) response = requests.get(url, stream=True) total_size = int(response.headers.get('content-length', 0)) if not total_size: self.progress_text['text'] = section + ' 스킨 다운로드에 실패하였습니다.' return block_size = 1024 t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) with open(download_path, 'wb') as f: for data in response.iter_content(block_size): self.progress_text['text'] = t t.update(len(data)) f.write(data) t.close() f.close() response.close() self.progress_text['text'] = '압축 해제중입니다.' common.unzip(download_path, target) os.remove(download_path) self.progress_text['text'] = section + ' 스킨 설치가 완료되었습니다.'
def run_fires(site, region): url = site + '/jobs/archive' try: p = getPage(url) except Exception as e: logging.error("Can't load {}".format(url)) logging.error(e) return None a = p.findAll('a') zips = [x.get('href') for x in a if x.get('href').endswith('.zip')] fires = sorted(set([x[x.rindex('/') + 1:x.index('_')] for x in zips])) times = {} recent = {} simtimes = {} dates = [] totaltime = 0 dir_download = common.ensure_dir(os.path.join(DIR, region)) dir_ext = common.ensure_dir(os.path.join(EXT_DIR, region)) logging.debug("Checking {} fires".format(len(fires))) for f in fires: times[f] = [ datetime.datetime.strptime(x[x.rindex('_') + 1:x.rindex('.')], '%Y%m%d%H%M%S%f') for x in zips if x[x.rindex('/') + 1:x.index('_')] == f ] recent[f] = { 'time': max(times[f]), 'url': [ x for x in zips if x[x.rindex('/') + 1:x.index('_')] == f and datetime.datetime.strptime( x[x.rindex('_') + 1:x.rindex('.')], '%Y%m%d%H%M%S%f') == max(times[f]) ][0], } logging.debug('{}: {}'.format(f, recent[f]['time'])) z = common.save_http(dir_download, site + recent[f]['url'], ignore_existing=True) cur_dir = os.path.join(dir_ext, os.path.basename(z)[:-4]) common.unzip(z, cur_dir) fgmj = os.path.join(cur_dir, 'job.fgmj') if os.path.exists(fgmj): try: t0 = timeit.default_timer() log_name = firestarr.do_run(fgmj) t1 = timeit.default_timer() if log_name is not None: simtimes[f] = t1 - t0 totaltime = totaltime + simtimes[f] logging.info("Took {}s to run {}".format(simtimes[f], f)) d = os.path.basename(os.path.dirname(log_name))[:8] if d not in dates: dates.append(d) except Exception as e: logging.error(e) return simtimes, totaltime, dates
def process(): DLDir = os.path.join(originalDir, "data", "CVEXML") try: os.makedirs(DLDir) except OSError: pass urlBase = "https://static.nvd.nist.gov/feeds/xml/cve/" os.chdir(DLDir) for year in range(2002, datetime.datetime.now().year + 1): fileName = "nvdcve-2.0-" + str(year) + ".xml.zip" url = urlBase + fileName common.download_url(url, fileName) common.unzip(fileName) os.remove(fileName)
def get_data_set(dirname, start_index=None, end_index=None): fnames, inputs, codes = common.unzip( list(common.read_data_for_lstm_ctc(dirname, start_index, end_index))) inputs = inputs.swapaxes(1, 2) targets = [np.asarray(i) for i in codes] sparse_targets = sparse_tuple_from(targets) seq_len = np.ones(inputs.shape[0]) * common.OUTPUT_SHAPE[1] return fnames, inputs, sparse_targets, seq_len
def process(): DLDir = os.path.join(originalDir, "data", "CVEXML") try: os.makedirs(DLDir) except OSError: pass # NVD's XML Vulnerability Feeds have been deprecated. Use JSON instead.. # https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-2002.json.zip urlBase = "https://nvd.nist.gov/feeds/json/cve/1.1/" os.chdir(DLDir) for year in range(2002, datetime.datetime.now().year + 1): fileName = "nvdcve-1.1-{0}.json.zip".format(year) url = urlBase + fileName common.download_url(url, fileName) common.unzip(fileName) os.remove(fileName)
def get_data_set(dirname, start_index=None, end_index=None): b = list(common.read_data_for_lstm_ctc(dirname, start_index, end_index)) # type:list,len:200, #b[1]:(img,code) tuple inputs, codes = common.unzip( b) #inputs.shape:(64, 60, 3000) codes.shape:(64,) (64 images) inputs = inputs.swapaxes(1, 2) #inputs.shape:(64, 3000, 60) targets = [np.asarray(i) for i in codes] #toList,len:64 sparse_targets = sparse_tuple_from(targets) #[indices, values, shape] seq_len = np.ones(inputs.shape[0]) * common.OUTPUT_SHAPE[ 1] #arrary([256,...,256]), shape:(64,) # We don't have a validation dataset :( return inputs, sparse_targets, seq_len
def process(): # first download the modified cve data from NVD fileName = "nvdcve-1.1-modified.json.zip" url = "https://nvd.nist.gov/feeds/json/cve/1.1/" + fileName common.download_url(url, fileName) common.unzip(fileName) os.remove(fileName) # load the pickled cve data print "Reading pickled data...", cveDict = pickle.load( open(os.path.join(originalDir, "data", "cvedata.pkl"), "rb")) print "[DONE]" subDict = common.parse_xml(fileName.replace(".zip", "")) cveDict.update(subDict) os.remove(fileName.replace(".zip", "")) print "Dumping updated pickle...", pickle.dump(cveDict, open(os.path.join(originalDir, "data", "cvedata.pkl"), "wb")) print "[DONE]"
def get_data_set(dirname, start_index=None, end_index=None): #start = time.time() inputs, codes = common.unzip(list(common.read_data_for_lstm_ctc(dirname, start_index, end_index))) #print("unzip time",time.time() - start ) inputs = inputs.swapaxes(1, 2) # print('train_inputs.shape', train_inputs.shape) # print("train_codes", train_codes) targets = [np.asarray(i) for i in codes] # print("targets", targets) # print("train_inputs.shape[1]", train_inputs.shape[1]) # Creating sparse representation to feed the placeholder # print("tttt", targets) sparse_targets = sparse_tuple_from(targets) # print(train_targets) seq_len = np.ones(inputs.shape[0]) * common.OUTPUT_SHAPE[1] # print(train_seq_len.shape) # We don't have a validation dataset :( return inputs, sparse_targets, seq_len
def get_data_set(dirname, start_index=None, end_index=None): #start = time.time() inputs, codes = common.unzip( list(common.read_data_for_lstm_ctc(dirname, start_index, end_index))) #print("unzip time",time.time() - start ) inputs = inputs.swapaxes(1, 2) # print('train_inputs.shape', train_inputs.shape) # print("train_codes", train_codes) targets = [np.asarray(i) for i in codes] # print("targets", targets) # print("train_inputs.shape[1]", train_inputs.shape[1]) # Creating sparse representation to feed the placeholder # print("tttt", targets) sparse_targets = sparse_tuple_from(targets) # print(train_targets) seq_len = np.ones(inputs.shape[0]) * common.OUTPUT_SHAPE[1] # print(train_seq_len.shape) # We don't have a validation dataset :( return inputs, sparse_targets, seq_len
common.download_file( 'https://raw.githubusercontent.com/nothings/stb/master/stb_image.h', 'stb/stb/stb_image.h') common.download_file( 'https://raw.githubusercontent.com/nothings/stb/master/stb_image_resize.h', 'stb/stb/stb_image_resize.h') common.download_file( 'https://raw.githubusercontent.com/nothings/stb/master/stb_image_write.h', 'stb/stb/stb_image_write.h') common.download_file( 'https://raw.githubusercontent.com/nothings/stb/master/stb_perlin.h', 'stb/stb/stb_perlin.h') # Boost common.download_file( 'https://netcologne.dl.sourceforge.net/project/boost/boost/1.67.0/boost_1_67_0.zip', 'boost_1_67_0.zip') common.unzip('boost_1_67_0.zip', 'boost_1_67_0') common.start_in_folder('bootstrap.bat', where='boost_1_67_0/boost_1_67_0/', if_not_exists='project-config.jam') common.start_in_folder( 'b2.exe', args=[ '--build-type=complete', # '--with-system' ], where='boost_1_67_0/boost_1_67_0/', if_not_exists='stage')
def run_case(case_name, process_name): """ 接收前端任务执行的操作类型为“run”时,会调用该方法来做具体的操作 @param case_name: 案例的名称 @param process_name: 流程的名称 @return: 返回案例的运行结果 """ result = { "status": RUN_SUCCESS, "casename": case_name, "processName": process_name, "path": "", "msg": "" } try: # 登录数据库 login_status = asset.login() if not login_status: err_msg = "Failed to login asset base." result["status"] = RUN_ERROR result["msg"] = err_msg case_log.error(result) return result else: if '\.Main' in case_name: case_name = case_name.split('\.Main')[0] if "." in case_name: if case_name.split(".")[-1] in support_asset_file_type: asset_name = case_name else: asset_name = ".".join([case_name, default_asset_file_type]) else: asset_name = ".".join([case_name, default_asset_file_type]) save_path = os.path.join(ZIP_PATH, asset_name) case_log.debug("Get asset name is: %s" % asset_name) case_log.debug("Asset package save path is: %s" % save_path) # 1、从资产信息库获取案例对应的资产信息 get_status, get_info = asset.get_asset_info(asset_name=asset_name) if not get_status: # 获取案例的信息失败,返回status为RUN_ERROR及错误信息 err_msg = "Failed to get asset [%s] information: %s" % ( asset_name, get_info) result["status"] = RUN_ERROR result["msg"] = err_msg case_log.error(result) return result else: case_log.info("Get asset [%s] infos: %s" % (asset_name, get_info)) # 2、将取出的数据存到本地, 并判断是否获取成功 get_rst = asset.get_asset_package(asset_info=get_info, save_path=save_path) if not get_rst: # 获取案例压缩包失败,返回status为RUN_ERROR及错误信息 err_msg = "Failed to get asset [%s] package." % asset_name result["status"] = RUN_ERROR result["msg"] = err_msg case_log.error(result) return result else: case_log.info("Get case package success.") # 3、解压zip文件内容,解压成功返回True,失败返回错误信息 case_log.debug("Start to Extract case package......") unzip_result = unzip(save_path, ZIP_PATH) if unzip_result is not True: # 解压案例压缩包失败,返回status为RUN_ERROR及错误信息 result["status"] = RUN_ERROR result["msg"] = unzip_result case_log.error(result) return result # 4、运行案例 case_log.debug("Start to run case......") test_runner_plugin.OnRun(case_name, ZIP_PATH) time.sleep(10) # 5、解析案例运行结束后生成的"output.xml"文件 case_log.debug("Start to parser case report output.xml file......") status, report_info = return_xml_status(report_path=os.path.join( RESULT_PATH, "output.xml"), case_name=case_name) if not status: # 解析案例报告失败,直接返回 result["status"] = RUN_ERROR result["msg"] = report_info case_log.error(result) return result else: # 解析报告成功,日志记录 case_log.debug("Get report info is: %s" % report_info) # 6、结果上传es case_log.debug("Start to save report to ElasticSearch......") es = ElasticSearch(index_name="showhtml", index_type="report", ip=ES_HOST) es.create_index() save_es_time = es.index_data(RESULT_PATH) case_log.info( "Save case [%s] report to ElasticSearch at time: %s" % (asset_name.split(".")[0], save_es_time)) # 7、案例运行完之后将将结果拷贝到指定的report目录, 并检查 case_log.debug("Start to copy report file to local path......") status, report_msg = copy_report_to_local( case_name=case_name, process_name=process_name) if not status: result["status"] = RUN_ERROR result["msg"] = report_msg case_log.error(result) return result else: local_path = report_msg case_log.info("Save case [%s] report to local path: %s" % (case_name, local_path)) # 8、将案例的运行报告保存到简云上 case_log.debug("Start to copy report file to qloud path......") qloud_path = send_report_to_qloud(process_name=process_name, case_name=case_name, local_path=local_path) result["path"] = qloud_path return result except Exception as e: err_msg = "Failed to execute [run] case: %s" % e.message result["status"] = RUN_ERROR result["msg"] = err_msg case_log.error(result) return result
def get(url, name, match): print('Downloading {}'.format(url)) # if not os.path.exists(os.path.join(DOWNLOAD_DIR, os.path.basename(url))): file = download(url, DOWNLOAD_DIR) print('Extracting {}'.format(name)) unzip(file, os.path.join(EXTRACTED_DIR, name), match)