def solve(self): last_valid_state = self.states.copy() lowest_temp = 0.1 num_states = self.numCities last_print = time.time() while self.temperature > lowest_temp: for _ in range(self.numStates**2): city = random.randint(0, self.numCities - 1) tour = random.randint(0, self.numCities - 1) prob_on = self.__prob_on(city, tour, self.temperature) prob_flip = prob_on if self.states[city, tour] == 0 else 1 - prob_on if np.random.binomial(1, prob_flip): self.states[city, tour] = 1 - self.states[city, tour] if utils.isPathValid(self.states): last_valid_state = self.states.copy() # cooling... self.temperature *= 0.99 if time.time() - last_print >= 1: last_print = time.time() print 'Temp %s Current Distance %d' % ( self.temperature, utils.path_distance(self.distMat, utils.path(last_valid_state))) # by this point the last valid state variable should hold the results of the simulated annealing return utils.path(last_valid_state)
def create_model_folder(self, root: str, subfolder: str = None): if subfolder is None: timestamp = datetime.now().strftime('%s') folder = path(root, timestamp) else: folder = path(root, subfolder) template = 'weights_{epoch:03d}_{val_loss:2.4f}.hdf5' history_path = join(folder, 'history.csv') weights_path = join(folder, template) parameters_path = join(folder, 'parameters.json') if exists(folder): print('Model folder already exists. It will be deleted.', end=' ') while True: print('Proceed? [y/n]') response = input().lower().strip() if response in ('y', 'n'): if response == 'n': return False else: shutil.rmtree(folder) break os.makedirs(folder, exist_ok=True) self.subfolder = folder self.history_path = history_path self.weights_path = weights_path self.parameters_path = parameters_path return True
def CVE_monitor(): """ 存量离线解析,增量在线更新 """ json_list = [] # 取配置文件中控制存量解析的参数,得到待解析入库文件列表 conf = configparser.ConfigParser() conf.read('../conf/info.conf') stock = conf.get('CVE_Feeds', 'stock_control') if stock == 'True': zips = glob.glob('../data/json/nvdcve-1.1-*.json.zip') for z in zips: with zipfile.ZipFile(z) as zf: print("[UNZIP]:%s" % z) zf.extractall(path=path('../data/json')) jsons_stock = glob.glob('../data/json/nvdcve-1.1-*.json') jsons_stock = [ i for i in jsons_stock if 'modified' or 'recent' not in i ] json_list.extend(jsons_stock) conf.set('CVE_Feeds', 'stock_control', str(False)) with open('../conf/info.conf', 'w') as configfile: conf.write(configfile) else: print('[!] WARNING:stock is False,dont parse stock data!') # 下载最新的modified数据 cve_data_feeds = path('../data/nvd', time_delta() + 'nvd_data_feeds.html') if not os.path.exists(cve_data_feeds): r = requests.get(url) with codecs.open(cve_data_feeds, 'wb') as f: f.write(r.content) if os.path.exists(cve_data_feeds): with codecs.open(cve_data_feeds, 'rb') as f: modified_zip, modified_link = CVE_modified(f) json_download(modified_zip, modified_link) json_list.extend([path('../data/json', 'nvdcve-1.1-modified.json')]) # 统一解析存量和增量CVE数据 for j in json_list: print(j) ret = json2sql(j, dir_name='../data/nvd.db') return ret
def dynac(f, out, anf=None, compiler_args=()): """ Run compiler on file, ``f``, write results to ``out``. Raises ``DynaCompilerError`` on failure. """ f = path(f) if not f.exists(): raise DynaCompilerError("File '%s' does not exist." % f) cmd = ['%s/dist/build/dyna/dyna' % dynahome, '-B', 'python', '-o', out, f] if anf is not None: cmd += ['--dump-anf=' + anf] cmd += compiler_args p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode: assert not stdout.strip(), [stdout, stderr] stderr = hide_ugly_filename(stderr, lambda m: '\n %s\n' % span_to_src(m.group(0))) raise DynaCompilerError(stderr, f)
def for_student(fac_no, en_no, name): r_no = fac_no[5:8] path = os.path.join(utils.path(), 'iaj', 'Store') file_name = r_no + ' - ' + name + ' (' + fac_no + ')' +'.html' os.chdir(path) if os.path.isfile(file_name): print('File Exists... Skipping\n\t', file_name) else : try: form_data = {'FN':fac_no, 'EN':en_no, 'submit':'submit'} response = requests.post(url, data=form_data) soup = BeautifulSoup(response.text) with open(file_name , 'w+') as ou: print(soup.prettify(), file=ou) if 'CPI' in response.text : print('Saved result of', name) elif 'This Result has not been declared yet!' in response.text: print('No result') elif 'Faculty_No or En_No is incorrect!' in response.text : print('Wrong Faculty or Enrolment No.') else: print('Wrong input data or no result...') except requests.exceptions.ConnectionError as err: print('No Connection')
def dynac(f, out, anf=None, compiler_args=()): """ Run compiler on file, ``f``, write results to ``out``. Raises ``DynaCompilerError`` on failure. """ f = path(f) if not f.exists(): raise DynaCompilerError("File '%s' does not exist." % f) cmd = ['%s/dist/build/dyna/dyna' % dynahome, '-B', 'python', '-o', out, f] if anf is not None: cmd += ['--dump-anf=' + anf] cmd += compiler_args p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode: assert not stdout.strip(), [stdout, stderr] stderr = hide_ugly_filename( stderr, lambda m: '\n %s\n' % span_to_src(m.group(0))) raise DynaCompilerError(stderr, f)
def for_student(fac_no, en_no, name): r_no = fac_no[5:8] path = os.path.join(utils.path(), 'iaj', 'Store') file_name = r_no + ' - ' + name + ' (' + fac_no + ')' + '.html' os.chdir(path) if os.path.isfile(file_name): print('File Exists... Skipping\n\t', file_name) else: try: form_data = {'FN': fac_no, 'EN': en_no, 'submit': 'submit'} response = requests.post(url, data=form_data) soup = BeautifulSoup(response.text) with open(file_name, 'w+') as ou: print(soup.prettify(), file=ou) if 'CPI' in response.text: print('Saved result of', name) elif 'This Result has not been declared yet!' in response.text: print('No result') elif 'Faculty_No or En_No is incorrect!' in response.text: print('Wrong Faculty or Enrolment No.') else: print('Wrong input data or no result...') except requests.exceptions.ConnectionError: print('No Connection')
def main(self, filename, astype=float, delim='\s+'): filename = path(filename) if not filename.exists(): print 'file `%s` does not exist.' % filename return interp = self.interp fn = '%s/2' % self.name if interp.agg_name[fn] is None: interp.new_fn(fn, ':=') def term(a, v): interp.emit(interp.build(fn, *a), v, ruleix=None, variables=None, delete=False) with file(filename) as f: for i, line in enumerate(f): line = line.rstrip() line = re.split(delim, line) for j, v in enumerate(line): term((i, j), astype(v))
def filter_bed_files(bed_files, bim_files, params): """ filters SNP in all bed files Parameters ---------- bed_files : string identifier for the bed and fam files, without extension bim_files : string name of the bim file, with extension params : Parameters object Parameters object Returns ------- new_bed_ids : list of strings the file ids for the filtered bed files """ logging.info("starting bed filtering") new_bed_ids = [] for i, bed_file in enumerate(bed_files): filtered_bed_name = utils.path(params.twd, "filtered_bed%s" % i) filter_single_bed(bed_file, bim_files[i], filtered_bed_name, params.retained_snp, params.plink) new_bed_ids.append(filtered_bed_name) logging.info("finished bed filtering") return new_bed_ids
def for_student(fac_no, en_no, name): r_no = fac_no[5:8] path = os.path.join(utils.path(), 'iaj', 'Store') file_name = r_no + ' - ' + name + ' (' + fac_no + ')' + '.html' os.chdir(path) if os.path.isfile(file_name): print('File Exists... Skipping\n\t', file_name) else: try: response = requests.get( 'http://ctengg.amu.ac.in/web/table_resultnew.php?fac=' + fac_no + '&en=' + en_no + '&prog=btech') soup = BeautifulSoup(response.text) with open(file_name, 'w+') as ou: print(soup.prettify(), file=ou) if 'CPI' in response.text: print('Saved result of', name) elif 'This Result has not been declared yet!' in response.text: print('No result') elif 'Faculty_No or En_No is incorrect!' in response.text: print('Wrong Faculty or Enrolment No.') else: print('Wrong input data or no result...') except requests.exceptions.ConnectionError: print('No Connection')
def main(): ratings = spark.read.parquet('output/review.parquet') ratings = ratings.select(ratings['user_id'], ratings['business_id'], ratings['stars']) train, test = ratings.randomSplit([0.8, 0.2]) t0 = time() als = ALS(maxIter=5, regParam=0.01, userCol="user_id", itemCol="business_id", ratingCol="stars", coldStartStrategy="drop") ratings_model = als.fit(train) time_len = time() - t0 print("New model trained in {} seconds".format(time_len)) predictions = ratings_model.transform(test) evaluator = RegressionEvaluator(metricName="rmse", labelCol="stars", predictionCol="prediction") rmse = evaluator.evaluate(predictions) with open('rmse.txt', 'w') as f: f.write(str(rmse)) print("Root-mean-square error = " + str(rmse)) # After test, train with all data and save this model ratings_model = als.fit(ratings) ratings_model.save(utils.path('ratings_model'))
def load_bpf_program(path, cflags=[]): with open(path, 'r') as f: text = f.read() for match in re.findall(r"(#include\s*\"(.*)\")", text): real_header_path = os.path.abspath(utils.path(match[1])) text = text.replace(match[0], ''.join(['#include "', real_header_path, '"'])) return BPF(text=text, cflags=cflags)
def main(self, filename): filename = path(filename) if not filename.exists(): print 'file `%s` does not exist.' % filename return with file(filename, 'r') as f: interp = cPickle.load(f) return interp
def parse_error(error_file): s = open(error_file).read() lines = s.split("\n") lines = [l.strip() for l in lines if l.strip().startswith("File ")] lines = lines[-2:] filenames = [l.split(",")[0].split(" ")[-1].strip('"') for l in lines] pkgs = [str(path(f).namebase) for f in filenames] return pkgs
def parse_error(error_file): s = open(error_file).read() lines = s.split('\n') lines = [l.strip() for l in lines if l.strip().startswith('File ')] lines = lines[-2:] filenames = [l.split(',')[0].split(' ')[-1].strip('"') for l in lines] pkgs = [str(path(f).namebase) for f in filenames] return pkgs
def universe_versions(dir='simple', universe=('foo', 'goo', 'hoo')): pkgs = path(dir).abspath() res = {} for pkg in universe: l = list(versions(dir + '/' + pkg)) res[pkg] = l return res
def universe_versions(dir='simple', universe=('foo', 'goo', 'hoo')): pkgs = path(dir).abspath() res = {} for pkg in universe: l = list(versions(dir+'/'+pkg)) res[pkg] = l return res
def influence(feature, year, month=0): """Génère le graphe et sauvegarde dans`figures`.""" figure, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.set_xlabel("temps (jours)") for dtype, name, axis, color in zip(('conso', 'pertes'), (feature, 'perte'), (ax1, ax2), "br"): dataframe = pd.read_csv(path(f"data/{dtype}/{dtype}_{year}.csv")) if month: dataframe = dataframe.loc[dataframe['mois'] == month] data = dataframe.groupby('jour').sum()[name] axis.plot(data, f"{color}-") axis.set_ylabel(name, color=color) axis.tick_params(axis='y', labelcolor=color) figure.tight_layout() plt.title(f"{feature} {year}/{month}") plt.savefig(path(f"figures/{feature}_{year}_{month}.png"))
def branch_names(dir='simple', universe=('foo', 'goo', 'hoo')): _dir = path(dir) res = {} for pkg in universe: p = _dir / pkg repo = git.Repo(p) res[pkg] = repo.active_branch.name return res
def branch_names(dir='simple', universe=('foo', 'goo', 'hoo')): _dir = path(dir) res = {} for pkg in universe: p = _dir/pkg repo = git.Repo(p) res[pkg] = repo.active_branch.name return res
def json_download(fname, url, retry=3, ret=False): """ 下载cve json格式的zip """ while retry > 0: try: print("[DOWNLOAD]:%s to %s %s" % (fname, url, retry)) r = requests.get(url, stream=True) # stream=True with open(path('../data/json', fname), 'wb') as f: shutil.copyfileobj(r.raw, f) retry = retry - 1 with zipfile.ZipFile(path('../data/json', fname)) as zf: print("[UNZIP]:%s" % fname) retry = 0 zf.extractall(path=path('../data/json')) ret = True except Exception as e: ret = False print("[DOWNLOAD ERROR]:%s error:%s" % (url, repr(e))) return ret
def dynac(self, filename): """ Compile a file full of dyna code. Note: this routine does not pass along parser_state. """ filename = path(filename) self.files.append(filename) out = self.tmp / filename.read_hexhash('sha1') + '.plan.py' #out = filename + '.plan.py' self.files.append(out) dynac(filename, out) return out
def draw_pie(table='nvd_cve', column='CVE_Items_impact_baseMetricV2_severity', time=2014, top=10): """ 画饼图,保存 """ od_pec = query(table=table, column=column, time=time, top=top) labels = od_pec.keys() values = od_pec.values() explode = [0.1 for _ in range(0, len(labels))] explode[-1] = 0 plt.rcParams['font.sans-serif'] = ['SimHei'] # 解决中文乱码 plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['axes.unicode_minus'] = False # 坐标轴负号的处理 plt.pie( values, # 指定绘图的数据 explode=explode, # 指定饼图某些部分的突出显示,即呈现爆炸式 labels=labels, # 为饼图添加标签说明,类似于图例说明 labeldistance=1.2, # 设置各扇形标签(图例)与圆心的距离; pctdistance=0.6, # :设置百分比标签与圆心的距离; startangle=90, # 设置饼图的初始摆放角度; shadow=True, # 是否添加饼图的阴影效果; autopct='%3.2f%%') plt.axis('equal') plt.title('漏洞占比') if not os.path.exists(path('data/img')): os.mkdir(path('data/img')) plt.savefig( path( 'data/img', '{time}_{table}_{column}_{top}.png'.format(time=time, table=table, column=column, top=top))) plt.show()
def merge(params): """ merges all input files into a single file merges all input files. The basic approach here is to 1) construct reference seq table 2) transform vcf file to plink format 3) rename all snp names in all plink files 4) filter plink files to only keep desired snp 5) merge the files using plink 6) restore snpids Parameters ---------- params : Parameters a Parameters object with all the options, see the -h output of the argparser """ if socket.gethostname() == "spudhead": return merge_grid(params) # 1 construct_reference_sequence(params) # 2 transformed_vcf_bed = transform_vcf_to_bed(params.vcf, params.twd, params.plink) # 3 tmp_bim_files = refseq.rename_snpids_from_data(transformed_vcf_bed) tmp_bim_files += refseq.rename_snpids_from_data(params.bed, params.twd) # 4 filtered_beds = filter_bed_files(bed_files=transformed_vcf_bed + params.bed, bim_files=tmp_bim_files, params=params) # 5 merge_beds(filtered_beds, params) # 6 if params.keep_snp_id != 'false': print "QWEQW", params.keep_snp_id refseq.restore_snpids(params.out, utils.path(params.twd, "original_ids.txt"))
def compare(reality, prediction, year, name=None): """Compare reality and prediction, save with given file name (path and extension auto built). """ plt.figure() real, pred = [d.reshape(-1, 24).sum(1) for d in (reality, prediction)] plt.plot(real, 'g-', label='pertes observées') plt.plot(pred, 'r-', label="pertes prédites") plt.title("Pertes électriques sur l'année " + str(year)) plt.xlabel("Jour") plt.ylabel("Pertes éléctriques journalières en MWh") plt.legend(loc="upper right") if name: plt.savefig(path(f"figures/{name}.png")) plt.show()
def write_all_plink_files(reference_alleles, params): """writes all the files to interact with plink Parameters ---------- reference_alleles : refseq reference sequence data to be written params : Parameters Parameters object with file names """ write_exclusion_file(reference_alleles, params.dropped_snp) write_inclusion_file(reference_alleles, params.retained_snp) write_plink_reference_file(reference_alleles, "dump.txt") write_original_id_file(reference_alleles, utils.path(params.twd,"original_ids.txt"))
def compare_nd(reality, prediction, day, n, name=None): """Compare reality and prediction, save with given file name (path and extension auto built). """ plt.figure() real = reality[day:day + n] pred = prediction[day:day + n] plt.plot(real, 'g-', label='pertes observées') plt.plot(pred, 'r-', label="pertes prédites") plt.title("Pertes électriques sur les jours " + str(day) + " jusqu'au jour " + str(day + n)) plt.xlabel("Heure") plt.ylabel("Pertes éléctriques horaires en MWh") plt.legend(loc="upper right") if name: plt.savefig(path(f"figures/{name}.png")) plt.show()
def __init__(self, points, findShortest=True, prob=1, run_idx=1): ''' initialize search args: points: N x 2 ndarray of 2D points findShortest: boolean, find shortest or longest path ''' self.points = points.astype(np.float32) self.dmatrix = cdist(points, points) self.N = points.shape[0] self.findShortest = findShortest self.best_path = path(np.random.permutation(self.N), self.dmatrix, findShortest=self.findShortest) self.best_path_length = self.best_path.d self.best_fitness = self.best_path.f self.log_dir = './saved/' self.prob = prob self.run_idx = run_idx
def __init__(self, points, findShortest=True, prob=1, run_idx=1): ''' basic hill climbing algorithm - mutate current best path args: points: N x 2 ndarray of 2D points findShortest: boolean, find shortest or longest path ''' self.points = points self.dmatrix = cdist(points, points) self.N = points.shape[0] self.findShortest = findShortest self.best_path = path(np.random.permutation(self.N), self.dmatrix, findShortest=findShortest) self.best_path_length = self.best_path.d self.best_fitness = self.best_path.f self.log_dir = './saved/' self.prob = prob self.run_idx = run_idx
def mitre_expdb_exp(): """ 从cve.mitre.org中提取CVE exp label,弥补nvd.nist中Resource中Exploit标记的不足 更新策略:全量更新,返回全部数据 :return cve_exp: :type:dict :value:{'cve-id':'edb-id'} """ hfile = path('../data/nvd', time_delta() + 'source-EXPLOIT-DB.html') if not os.path.exists(hfile): r = requests.get( 'https://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html') html = r.content with codecs.open(hfile, 'wb') as f: f.write(html) cve_exp = dict() if os.path.exists(hfile): with codecs.open(hfile, 'rb') as f: soup = BeautifulSoup(f, 'html.parser') for tr in soup.find_all('tr'): exp_db = '' cve = '' for td in tr.find_all('td'): t = str(td) if re.search(r'EXPLOIT-DB:(\d+)', t): r = re.search(r'EXPLOIT-DB:(\d+)', t) exp_db = r.group(1) elif re.search(r'(CVE-[\d]+-[\d]+)', t): r = re.findall(r'(CVE-[\d]+-[\d]+)', t) cve = [] for c in r: cve.append(c) else: continue if exp_db and cve: if isinstance(cve, list): for c in cve: cve_exp[c] = exp_db else: cve_exp[cve] = exp_db return 'cve.mitre.org', cve_exp
def rename_snpids_from_data(tmp_bed_files, twd=""): """ renames snpids to chrom_pos_a1_a2 """ new_tmp_files = [] for fname in tmp_bed_files: new_name = os.path.basename(fname) if twd == "": out_path = "%s_tmp.bim" % new_name else: out_path = utils.path(twd, "%s_tmp.bim" % new_name) bim = BimFile(fname) data = bim.load_variants() fix_chromosome_ids(data) data['snpid'] = create_data_id(data) BimFile.write_file(out_path, data) new_tmp_files.append(out_path) return new_tmp_files
def populate(file_name, reset=0): path = os.path.join(utils.path(), 'iaj', 'Store') if not os.path.exists(path): os.makedirs(path) cd(1) if os.path.isfile('students.db') and not file_changed(file_name) and not reset: with open('students.db', 'r+') as fi: s = fi.read() students = ast.literal_eval(s) try : if students[1]['name'] in s: return students else: print('Bad dictionary file...\nRetrying reading from Excel file') return get_students(file_name) except KeyError as err: print('Bad dictionary file... \nRetrying reading Excel file') return get_students(file_name) else : return get_students(file_name)
def populate(file_name, reset=0): path = os.path.join(utils.path(), 'iaj', 'Store') if not os.path.exists(path): os.makedirs(path) cd(1) if os.path.isfile('students.db') and not file_changed(file_name) and not reset: with open('students.db', 'r+') as fi: s = fi.read() students = ast.literal_eval(s) try: if students[1]['name'] in s: return students else: print('Bad dictionary file...\nRetrying reading from Excel file') return get_students(file_name) except KeyError: print('Bad dictionary file... \nRetrying reading Excel file') return get_students(file_name) else: return get_students(file_name)
def test(): pkgs = path('pkgs').abspath() sklearn = ('scikit-learn', 'scikit-learn') openalea = ('openalea', 'openalea') if not (pkgs / sklearn[-1]).isdir(): pkgs.cd() clone(*sklearn) (pkgs / '..').cd() if not (pkgs / openalea[-1]).isdir(): pkgs.cd() clone(*openalea) (pkgs / '..').cd() _tags = tags('pkgs/scikit-learn') print _tags _versions = list(versions('pkgs/scikit-learn')) return _tags, _versions
def test(): pkgs = path('pkgs').abspath() sklearn = ('scikit-learn', 'scikit-learn') openalea = ('openalea', 'openalea') if not (pkgs/sklearn[-1]).isdir(): pkgs.cd() clone(*sklearn) (pkgs/'..').cd() if not (pkgs/openalea[-1]).isdir(): pkgs.cd() clone(*openalea) (pkgs/'..').cd() _tags = tags('pkgs/scikit-learn') print _tags _versions = list(versions('pkgs/scikit-learn')) return _tags, _versions
def transform_vcf_to_bed(vcf_files, twd, plink="plink"): """ transforms the input vcf files to bed files for merging as plink only merges at most one vcf file, we convert the vcf files to bed first. The new files are written in tmp_bed_files Parameters ---------- vcf_files : list of strings names of vcf files twd : string temporary working directory plink : string name of plink exe Returns ------- tmp_bed_files : list of strings bed file id of new bed files """ logging.info("starting vcf to bed transformation") tmp_bed_files = [] for i, vcf_file in enumerate(vcf_files): tmpname = utils.path(twd, "tmp_vcf%s"%i) vcf.to_bed (vcf_file, bed_name = tmpname, plink = plink) tmp_bed_files.append(tmpname) logging.info("finished vcf to bed transformation") return tmp_bed_files
def main(self, filename): filename = path(filename) if not filename.exists(): print 'file `%s` does not exist.' % filename return interp = self.interp name = self.name def obj(*a): fn = '%s/%s' % (name, len(a)) if interp.agg_name[fn] is None: interp.new_fn(fn, '=') return interp.build(fn, *a) contents = file(filename).read() for i, x in enumerate(parse_sexpr(contents)): interp.emit(obj(i), todyna(x), ruleix=None, variables=None, delete=False)
def CUB(self, ratio, total_ratio=1.0): pickle_path = utils.base_path() + "/data/datasets.pkl" if os.path.isfile(pickle_path): print("Using pickled data!") datasets = pickle.load(open(pickle_path, 'rb')) img_path = datasets["train"].imgs[0][0] if utils.base_path() not in img_path: import re for phase in datasets: for i in range(len(datasets[phase])): path, o = datasets[phase].imgs[i] path = re.sub(r'^.*(data/images)', r'\1', path) datasets[phase].imgs[i] = (utils.path(path), o) return datasets train_id, test_id = self.split(ratio, total_ratio) splits = {'train': train_id, 'test': test_id} datasets = { split: CUB_Dataset(self.path, splits[split]) for split in ('train', 'test') } pickle.dump(datasets, open(pickle_path, 'wb')) print("Data loaded from disk and has been pickled!") return datasets
def run(self, niters, save=True): ''' run a random search args: niters: # of iterations ''' from utils import path start = time.time() self.fitness_hist = np.zeros(niters) for t in range(niters): # generate new random ordering new_order = np.random.permutation(self.N) new_path = path(new_order, self.dmatrix, findShortest=self.findShortest) # compare to current best path and replace if better if new_path.f > self.best_fitness: self.best_path = new_path self.best_fitness = new_path.f self.best_path_length = new_path.d #self.best_path.calc_length() # log current best self.fitness_hist[t] = self.best_fitness if (t + 1) % 2000 == 0: print("\rRandom Search run {}: {}/{} evaluations complete - best length = {}".format(self.run_idx, t + 1, niters, self.best_path_length), end="") sys.stdout.flush() end = time.time() print ('\nTotal time: {} min'.format((end-start)/60.0)) if save: s = 'short' if self.findShortest else 'long' path = self.log_dir + 'RS_{}evals_TSP{}_{}_run{}_'.format(niters, self.prob, s, self.run_idx) np.savetxt(path + 'fitness_hist.csv', self.fitness_hist, delimiter=',') np.savetxt(path + 'best_path.csv', self.best_path.order, delimiter=',') print ('\nSaved.')
import tensorflow as tf import numpy as np import _pickle as pickle from resnet import Resnet18 import utils import os PKL_PATH = utils.path('models/resnet-18.pkl') INIT_CHECKPOINT_DIR = utils.path('models/init') model_weights_temp = pickle.load(open(PKL_PATH, "rb")) # Transpose conv and fc weights model_weights = {} for k, v in model_weights_temp.items(): if len(v.shape) == 4: model_weights[k] = np.transpose(v, (2, 3, 1, 0)) elif len(v.shape) == 2: model_weights[k] = np.transpose(v) else: model_weights[k] = v # Build ResNet-18 models and save parameters # Build models print("Build ResNet-18 models") model = Resnet18(mode="train", batch_size=32) with model.graph.as_default(): model.preload() with tf.Session(graph=model.graph) as sess: init = tf.global_variables_initializer() sess.run(init)
def merge_beds(bed_files, params, merge_file="merge.txt"): """ merges all the bed files sets up a plink command that merges all input bed files. in order to do so, we have to create a merge file containing all the bed files Parameters ---------- bed_files : string all the bed files to merge params : Parameters parameters object merge_file : string name of the plink merge file to be created in the temporary working directory Returns ------- """ logging.info("starting merging") final_filter = False if params.output_type != 'bed': final_filter = True elif params.check_reference: final_filter = True # 1. merge merge_file = utils.path(params.twd, "merge.txt") with open(merge_file, 'w') as handle: for bed_file in bed_files[1:]: handle.write("%s\n" % bed_file) flags = dict() flags['merge-list'] = merge_file flags['bfile'] = bed_files[0] flags['merge-mode'] = 1 if final_filter or params.set_missing_to_reference: flags['out'] = utils.path(params.twd, "tmp_out") else: flags['out'] = params.out utils.run_plink(params.plink, flags) # 2. set missing to reference if params.set_missing_to_reference: flags = dict() flags['bfile'] = utils.path(params.twd, "tmp_out") if params.check_reference: flags['a2-allele'] = params.ref_allele if params.set_missing_to_reference: flags['fill-missing-a2'] = '' flags['out'] = utils.path(params.twd, "tmp_impute") flags['make-bed'] = '' utils.run_plink(params.plink, flags) # 3. set reference allele if final_filter: logging.info(" applying final filter ") filter_flags = dict() filter_flags['bfile'] = flags['out'] if params.output_type == 'bed': filter_flags['make-bed'] = '' if params.output_type == 'vcf': filter_flags['recode'] = 'vcf-iid' if params.output_type == 'ped': filter_flags['recode'] = '' filter_flags['out'] = params.out if params.check_reference: filter_flags['a2-allele'] = params.ref_allele utils.run_plink(params.plink, filter_flags) logging.info("finished merging")
def render(self, template_path, data): from google.appengine.ext.webapp import template if not data.has_key('auth'): data.update(dict(auth = utils.authdetails())) self.respond(template.render(utils.path(template_path), data))
def main(dynafile, browser=True): dynafile = path(dynafile) d = dynafile + '.d' d.mkdir_p() with file(d / 'index.html', 'wb') as html: print >> html, HEADING print >> html, '<div id="dyna-source">' print >> html, ' <pre>' with file(dynafile) as f: original_code = f.read() pretty_code, offset = format_code(original_code) for lineno, line in enumerate(pretty_code.split('\n'), start=0): print >> html, '<a onclick="selectline(%s)">%s </a>' % (lineno + offset, line) print >> html, ' </pre>' print >> html, '</div>' print >> html, '<div id="circuit-pane" style=""></div>' print >> html, '<div id="dopamine-pane" style=""></div>' print >> html, '<div id="update-handler-pane" style=""></div>' from dynac import dynac dynac(dynafile, out = d / 'plan', anf = d / 'anf', compiler_args = ['--dump-dopini=' + d / 'dopini', '--dump-dopupd=' + d / 'dopupd']) print >> html, '<div style="display:none;">' with file(d / 'anf') as f: rules = [circuit(x) for x in read_anf(f.read())] # output map from source lines to rule index print >> html, '<script type="text/javascript" language="javascript">source_to_ruleix = {' for r in rules: [(_filename, bl, el)] = re.findall(r'(.*):(\d+):\d+-\1:(\d+):\d+', r.source_lines) for line in xrange(int(bl)-1, int(el)): print >> html, ' %s: %s,' % (line, r.ruleix) # these lines go to this rule. print >> html, '}; </script>' for g in rules: sty = graph_styles(g) svg = g.render(dynafile + '.d/rule-%s' % g.ruleix, sty) print >> html, '<div class="circuit-%s">%s</div>' % (g.ruleix, svg) # find "update plans" -- every term (edge) in a rule must have code to # handle an update to it's value. # ------------- # Dopamine code with file(d + '/dopupd') as f: code = f.read() print >> html, '<h2>Update plans</h2>' for (ruleix, x, block) in re.findall(';; .*? ruleix=(\d+) (.*)\n([\w\W]+?)(?=;;)', code): print >> html, '<div class="dopamine-%s"><h3>Update %s</h3><pre>%s</pre></div>' % (ruleix, x, block.strip()) with file(d + '/dopini') as f: code = f.read() print >> html, '<h2>Initialization plans</h2>' for (ruleix, block) in re.findall(';; .*? ruleix=(\d+) .*\n([\w\W]+?)(?=;;)', code): print >> html, '<div class="dopamine-%s"><h3>Initializer</h3><pre>%s</pre></div>' % (ruleix, block.strip()) # ---------------- # Python code with file(d + '/plan') as f: code = f.read() print >> html, '<h2>Update code</h2>' for block in re.split('\n\s*\n', code): x = re.findall('RuleIx: (\d+)\n', block) if not x: continue [ruleix] = x lexer = get_lexer_by_name("python", stripall=True) formatter = HtmlFormatter(linenos=False) print >> html, """<div class="handler-%s"><pre>%s</pre></div>""" % (ruleix, highlight(block, lexer, formatter)) print >> html, '</pre>' print >> html, '</div>' if browser: webbrowser.open(html.name)
def cd(a=0): if a == 0: path = os.path.join(utils.path(), 'Input') elif a == 1: path = os.path.join(utils.path(), 'iaj') os.chdir(path)
order to run 3rd option. If this option is not run, then no data can be written in Excel file. Third option reads your CPI and SPI from Updated database and saves the information as Excel file in Output/ folder. Note: App creates required files and databases in iaj/ folder for proper functioning. Please don't delete those files. Let's start: """ os.chdir(utils.path()) url = 'http://ctengg.amu.ac.in/result_btech.php' # Let's Go! print(welcome) def for_student(fac_no, en_no, name): r_no = fac_no[5:8] path = os.path.join(utils.path(), 'iaj', 'Store') file_name = r_no + ' - ' + name + ' (' + fac_no + ')' +'.html' os.chdir(path) if os.path.isfile(file_name):
def time_hml(table='nvd_cve', column='CVE_Items_impact_baseMetricV2_severity', time=2020): """ 历年来高中低危CVE数量折线图 """ od_high = {} od_mid = {} od_low = {} od_all = {} if len(time) == 2: year = int(time_op(delta=0, format="%Y")) - 1999 + 1 for i in range(year): time = 1999 + i od = query_timeline(table='nvd_cve', column=column, time=time) od_high[str(time)] = od['HIGH'] od_mid[str(time)] = od['MEDIUM'] od_low[str(time)] = od['LOW'] od_all[str(time)] = od['HIGH'] + od['MEDIUM'] + od['LOW'] elif len(time) == 4: if time < time_op(delta=0, format="%Y"): month = 12 else: month = time_op(delta=0, format="%Y%m")[4:] for i in range(1, int(month) + 1): if i < 10: i = '0' + str(i) time1 = time + '-' + i od = query_timeline(table='nvd_cve', column=column, time=time1) if 'HIGH' not in od.keys(): od['HIGH'] = 0 if 'MEDIUM' not in od.keys(): od['MEDIUM'] = 0 if 'LOW' not in od.keys(): od['LOW'] = 0 od_high[str(time1)] = od['HIGH'] od_mid[str(time1)] = od['MEDIUM'] od_low[str(time1)] = od['LOW'] else: return print(od_all, od_high, od_mid, od_low) plt.rcParams['font.sans-serif'] = ['SimHei'] # 解决中文乱码 plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['axes.unicode_minus'] = False # 坐标轴负号的处理 plt.plot(od_all.keys(), od_all.values(), label='ALL') plt.plot(od_high.keys(), od_high.values(), label='HIGH') plt.plot(od_mid.keys(), od_mid.values(), label='MEDIUM') plt.plot(od_low.keys(), od_low.values(), label='LOW') plt.xticks(rotation=45) plt.yticks(rotation=30) plt.legend() plt.xlabel('Year') plt.ylabel('Type') plt.title('漏洞趋势') if not os.path.exists(path('data/img')): os.mkdir(path('data/img')) plt.savefig( path( 'data/img', '{time}_{table}_{column}.png'.format(time=time, table=table, column=column))) plt.show()
import os from utils import path dotdynadir = path('~/.dyna').expand() if not dotdynadir.exists(): dotdynadir.mkdir() dynahome = path(os.getenv('DYNAHOME', '.'))
import sys import copy import time from threading import Thread, Semaphore from s2sphere import LatLng from datetime import datetime import utils import consts sys.path.append(utils.path(__file__, '../../modules/PokemonGoMap')) import pogom.search import pogom.utils from pogom.models import Pokemon class Searcher(): def __init__(self, location, handler, step): self.location = location self.step = step self.handler = handler def search(self): args = copy.copy(pogom.utils.get_args()) args.location = self.location.to_string() args.step_limit = self.step num_steps = args.step_limit total_steps = (3 * (num_steps**2)) - (3 * num_steps) + 1 position = (self.location.latitude, self.location.longitude, 0)
def test_path(self): racer = WikiRacer("test_start_url", "test_end_url") racer.child_parent_urls = {'url2': 'url1', 'url3': 'url1', 'url4': 'url2', 'url5': 'url2', 'url6': 'url3', 'url1': 'test_start_url', 'test_end_url': 'url6'} self.assertEqual(utils.path(racer, 'test_end_url'), ['test_start_url', 'url1', 'url3', 'url6', 'test_end_url'])
def run(self, population_size=100, n_gens=1000, p_cross=0.7, p_mut=0.5, roulette=False): ''' run the algorithm for n_gens evaluations args: population_size: size of population n_gens: number of evaluations to run p_cross: the crossover probability p_mut: the mutation probability ''' population = [] # initialize each chromosome as a random permutation of the points (by index) for _ in range(population_size): population.append( path(np.random.permutation(self.N), self.dmatrix, findShortest=self.findShortest)) self.fitness_hist = [] self.best_path = None gen = 0 self.best_fit = max([x.f for x in population]) self.best_path_length = -1.0 * self.best_fit if self.findShortest else self.best_fit fitness_convergence = [] total_start = time.time() gen_start = time.time() while gen < n_gens: # population fitness fitness_raw = np.asarray([x.f for x in population]) # record population convergence cnum = -12.56 if self.prob == 1 else -30.0 fitness_convergence.append( sum(fitness_raw > cnum) / float(population_size)) # normalize fitness values to use as a valid probability dist fitness_norm = fitness_raw / np.sum(fitness_raw) gen_best_idx = np.argmax(fitness_raw) gen_best_fit = fitness_raw[gen_best_idx] gen_best_path = population[gen_best_idx] gen_best_length = -1.0 * gen_best_fit if self.findShortest else gen_best_fit gen_mean_fit = np.mean(fitness_raw) if gen_best_fit > self.best_fit: self.best_path = gen_best_path self.best_path_length = gen_best_length self.best_fit = gen_best_fit # repeat until population_size offspring have been created new_population = [gen_best_path] # elitism while (len(new_population) < population_size): if roulette: # roulette wheel selection: parent1_idx, parent2_idx = np.random.choice( np.arange(population_size), size=2, replace=True, p=fitness_norm) parent1, parent2 = population[parent1_idx], population[ parent2_idx] else: # tournament selection: k = 24 tourn1 = np.random.choice(np.arange(population_size), size=k, replace=False) tourn1 = [population[i] for i in tourn1] parent1 = max(tourn1, key=lambda p: p.f) tourn2 = np.random.choice(np.arange(population_size), size=k, replace=False) tourn2 = [population[i] for i in tourn2] parent2 = max(tourn2, key=lambda p: p.f) # crossover parents with probability p_cross do_cross = random.random() if do_cross < p_cross: child1o, child2o = cross_over(parent1.order, parent2.order) else: child1o, child2o = (parent1.order, parent2.order) # mutate offspring r1, r2 = np.random.rand(2) if r1 < p_mut: child1o = mutate(child1o) if r2 < p_mut: child2o = mutate(child2o) # add offspring to new population child1 = path(child1o, self.dmatrix, findShortest=self.findShortest) child2 = path(child1o, self.dmatrix, findShortest=self.findShortest) gen += 2 self.fitness_hist += [gen_best_fit, gen_best_fit] best_p = parent2 if parent2.f > parent1.f else parent1 best_c = child1 if child1.f > child2.f else child2 new_population.append(best_c) new_population.append(best_p) # break off if len(new_population) > population_size: new_population = new_population[:population_size] if gen % 100 == 0: gen_end = time.time() print( '\rGA run {} evaluation {:d}/{:d}: best fitness = {:.4f}, mean fitness = {:.9f},' .format(self.run_idx, gen, n_gens, gen_best_fit, gen_mean_fit) + ' overall best path = {:.3f}; {:.3f} secs/gen'.format( self.best_path_length, (gen_end - gen_start) / 100.0), end="") sys.stdout.flush() gen_start = time.time() population = new_population total_end = time.time() print('\nSaving results...') s = 'short' if self.findShortest else 'long' prefix = 'saved/GA2_{}evals_TSP{}_{}_run{}' np.savetxt(prefix.format(n_gens, self.prob, s, self.run_idx) + '_fitness_hist.csv', np.asarray(self.fitness_hist), delimiter=',') np.savetxt(prefix.format(n_gens, self.prob, s, self.run_idx) + '_best_path.csv', np.asarray(self.best_path.order), delimiter=',') np.savetxt(prefix.format(n_gens, self.prob, s, self.run_idx) + '_convergence.csv', np.asarray(fitness_convergence), delimiter=',') print('Done. \nTotal training time: {:.3f} min'.format( (total_end - total_start) / 60.0))
""" Simple test for liquid VM foo: healthy_head goo: master hoo : master """ from utils import sh, path import itertools import task3 import git d = task3.universe_versions() universe = d.keys() curdir = path(".").abspath() pkg_dir = path("simple") branch_names = task3.branch_names() cmd = "python simple/test.py" error_file = "error.txt" def checkout(pkg, commit): d = pkg_dir / pkg d.chdir() commit = str(commit)