def check_unused(): with open(work_dir + 'unused.txt') as f: unused = f.readlines() unused = [line.strip() for line in unused] ip_str = ch.concat_ips(unused) ch.check(ip_str)
def do_total_ext(out_file): total_dict = {} total_dict['data'] = [] key_list = [] total_files = os.listdir(constant.total_file_path) for i in total_files: if i.find('.csv') == -1: continue target = constant.total_file_path + i data = parse_system_file.parse_system_file_ext(target) if 'header' not in total_dict: total_dict['header'] = data['header'] check.check( utils.to_string(total_dict['header']) != utils.to_string( data['header']), "Header missmatch in {}".format(i)) for info in data['data']: key = info['卡片编号'] check.check(key in key_list, "{} {} 卡片编号 重复".format(i, key)) key_list.append(key) total_dict['data'].append(info) aa = 0 bb = 0 for i in total_dict['data']: if i['使用状态'] == '报废': aa = aa + 1 else: bb = bb + 1 print("do_total_ext {}, 总共: {}, 在用: {}, 报废: {}".format( constant.total_file_path, len(total_dict['data']), aa, bb)) return total_dict
def main(self): print(red + """ dBBBBBb dBBBBBb dBBBBP`Bb .BP dBP dBP dBBBBBb dBBBP dBBBBBb dBBBBBb dBBBP dBBBBBb dB' dBP dBP.BP .BP dBP dBP BB dB' dBP dBBBP' dBBBBK dBP.BP dBBK dBP dBBBBK' dBBP dBP BB dBBBP' dBBP dBBBBK dBP dBP BB dBP.BP dB' dBP dBP BB dBP dBP BB dBP dBP dBP BB dBP dBP dB' dBBBBP dB' dBP dBP dBP dB' dBBBBP dBBBBBBB dBP dBBBBP dBP dB' v1.1""" ) print(blue + 'by Xenex\n') m = get('Main Menu\n' +\ red + '[' + blue + '1' + red + '] - ' + white + 'Scrape\n' +\ red + '[' + blue + '2' + red + '] - ' + white + 'Check\n' +\ red + '[' + blue + '3' + red + '] - ' + white + 'Exit\n') if m == '1': os.system('cls') scrape.scrape() elif m == '2': os.system('cls') check.check() elif m == 'e': os.system('cls') sys.exit(1) else: os.system('cls') error('Input not recognised. Please retype and try again.') self.main()
def run_module_all(module, inputs, reportfile): correct = 0 for t, (path, data_text) in enumerate(sorted(inputs), 1): print(f"test {t:02d}:", os.path.basename(path)) print(f"test {t:02d}:", os.path.basename(path), file=reportfile) try: soln_text = run_module_once(module, path) # TODO: Catch different types of validation issues check(data_text, soln_text, verbose=False) # TODO: Print some kind of report, and total, for the student result = "solved!" correct += 1 except TimesUp: result = "TIMEOUT" except NonZeroExit as e: result = f"ERRORED {e}" except Exception as e: result = f"INVALID {e}" print("↪", result) print("↪", result, file=reportfile) print( module, "summary:", f"{correct} / {len(inputs)} ({correct / len(inputs):.2%})", ) print("summary:", f"{correct} / {len(inputs)} ({correct / len(inputs):.2%})", file=reportfile) return correct
def balance(graph): '''parame: graph, a DAG its.__class__ == nx.DiGraph return: r, removed edges set so makr the input graph a b-structure ''' # 只处理整数形式的图,每一个整数对应的节点可以在后面查到 # 输入进来的图应该是连通的,如果存在非连通图,minimum_edge_cut就会产生问题 assert nx.is_directed_acyclic_graph(graph),\ "The target graph you want to banlance is not a DAG" r = [] # removed set if check(graph): return r #非B-Stucture时,一直循环下去 # BUGY: 如果cs为空呢,那么不可能有两个图返回来,这时候怎么办 print "\nCutting Graph" cs, g1, g2 = cut(graph) r = balance(g1) + balance(g2) + cs csl = [] for eachEdge in cs: under_check_graph = graph.copy() under_check_graph.remove_edges_from(r) under_check_graph.add_edges_from(csl) under_check_graph.add_edge(eachEdge[0], eachEdge[1]) if check(under_check_graph): print "Edge: %s added back" % str(eachEdge) csl.append(eachEdge) graph.add_edge(eachEdge[0], eachEdge[1]) for eachEdge in csl: r.remove(eachEdge) print "Removed Edge Set: %s" % str(r) return r
def main(): try: os.mkdir("./results") except: pass name = input("Study Name: ") try: os.mkdir("./results/{}".format(name)) Gs, higher = Data.choose(LIMIT) results = [] for n in range(len(Gs)): results.append(Floyd.GPU(Gs[n], higher)) file = open("./results/{}/{}.txt".format(name, n), "w") file.write("Project {} #{} -- Size: {}\n\n".format( name, n, min(LIMIT, len(results[n][0])))) file.write(str(results[n][0]).replace("], ", "],\n")) file.close() Check.check(results) except: print("An error ocurred or project already exists") if (int( input( "Would you like to delete the project with the name '{}'? |0|1|: " .format(name)))): os.rmdir("./results/{}".format(name)) return
def balance(graph): '''parame: graph, a DAG its.__class__ == nx.DiGraph return: r, removed edges set so makr the input graph a b-structure ''' # 只处理整数形式的图,每一个整数对应的节点可以在后面查到 # 输入进来的图应该是连通的,如果存在非连通图,minimum_edge_cut就会产生问题 assert nx.is_directed_acyclic_graph(graph),\ "The target graph you want to banlance is not a DAG" r = [] # removed set if check(graph): return r #非B-Stucture时,一直循环下去 # BUGY: 如果cs为空呢,那么不可能有两个图返回来,这时候怎么办 print "\nCutting Graph" cs, g1, g2 = cut(graph) r = balance(g1) + balance(g2) + cs csl = [] for eachEdge in cs: under_check_graph = graph.copy() under_check_graph.remove_edges_from(r) under_check_graph.add_edges_from(csl) under_check_graph.add_edge(eachEdge[0],eachEdge[1]) if check(under_check_graph): print "Edge: %s added back" % str(eachEdge) csl.append(eachEdge) graph.add_edge(eachEdge[0],eachEdge[1]) for eachEdge in csl: r.remove(eachEdge) print "Removed Edge Set: %s" % str(r) return r
def main(self): print(red + ''' _____ _ __ _ | __ \\ | |/ / | | | |__) | __ _____ ___ _| ' / ___| | _____ _ __ | ___/ '__/ _ \\ \\/ / | | | < / _ \\ |/ / _ \\ '__| | | | | | (_) > <| |_| | . \\ __/ < __/ | |_| |_| \\___/_/\\_\\\\__, |_|\\_\\___|_|\\_\\___|_| __/ | |___/ \n''') print(blue + 'by Nexolyte\n') m = get('Main Menu\n' +\ red + '[' + blue + '1' + red + '] - ' + white + 'Scrape\n' +\ red + '[' + blue + '2' + red + '] - ' + white + 'Check\n' +\ red + '[' + blue + 'e' + red + '] - ' + white + 'Exit\n') if m == '1': os.system('cls') scrape.scrape() elif m == '2': os.system('cls') check.check() elif m == 'e': os.system('cls') sys.exit(1) else: os.system('cls') error('Input not recognised. Please retype and try again.') self.main()
def process(): print('## 자동 자가진단 process 시작') print('오늘의 요일: ' + return_korean_weekday(weekday)) if weekday == 5 or weekday == 6: print('오늘은 ' + return_korean_weekday(weekday) + '요일입니다. 자가진단을 생략합니다.') else: check()
def restart(a): total_moves, board =0 , {'1' : '','2' : '','3' : '','4' : '','5' : '','6' : '','7' : '','8' : '','9' : ''} while True: os.system("cls") print(board['1'] + '|' + board['2'] + '|' + board['3'] ) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6'] ) print('-+-+-') print(board['7'] + '|' + board['8'] + '|' + board['9'] ) print('-+-+-') check.check(total_moves,score1, score2, score_tie, playagain, board, p1, p2) while True: if a == 1: p1_input = input(f"its {p1}'s chance..\n where do you want to place your move:") if p1_input.upper() in board and board[p1_input.upper()] == "" + board[p1_input.upper()] = 'X' a = 2 break else: print("invaild inpt \n enter again. ") continue else: p2_input = input(f"its {p2}'s chance..\n where do you want to place your move:") if p2_input.upper() in board and board[p2_input.upper()] == "": + board[p2_input.upper()] = 'O' a = 1 break else: print("invaild input") continue total_moves += 1 while True: os.system(cls) print(board['1'] + '|' + board['2'] + '|' + board['3'] ) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6'] ) print('-+-+-') print(board['71'] + '|' + board['8'] + '|' + board['9'] ) print('-+-+-') check.check(total_moves,score1, score2, score_tie, playagain, board, p1, p2) while True: if a == 1: p1_input = input(f"its {p1}'s chance..\n where do you want to place your move:") if p1_input.upper() in board and board[p1_input.upper()] == "": + board[p1_input.upper()] = 'X' a = 2 break else: print("invaild inpt \n enter again. ") continue else: p2_input = input(f"its {p2}'s chance..\n where do you want to place your move:") if p2_input.upper() in board and board[p2_input.upper()] == "": + board[p2_input.upper()] = 'O' a = 1 break else: print("invaild input") continue total_moves += 1
def run_exercise(tmpdir): """Run the student and teacher code. `tmpdir` is the path to a directory containing "exercise.py" and "check.py", which will be imported to execute them. Returns a dictionary `results`. results['stdout'] is the stdout of the exercise. results['checks'] is a list of dicts:: [ {'status':'OK', 'expect':'You should have a variable named a'), {'status':'FAIL', 'expect':'a should equal 17', 'did':'Your a equals 43'}, ] `status` is one of 'OK', 'FAIL', 'EXCEPTION', or 'ERROR'. 'OK' means the expectation was met, 'FAIL' means it wasn't met, 'EXCEPTION' means an exception was encountered in the student's code, and 'ERROR' means an exception was encountered in the teacher's code. `expect` is the text of the `expect` call, what was expected. `did` is a message about what actually happened. `traceback` is a list of tuples, the traceback, if any, in the form produced by `traceback.extract_tb`. """ results = {'stdout': '', 'checks': []} with patchattr(sys, 'stdout', StringIO()) as stdout: with patchattr(sys, 'path', ['.']+sys.path): with isolated_modules(): c = Checker() try: import exercise except SystemExit: # The user code called sys.exit(), it's ok. pass except Exception: c.add_result('EXCEPTION', exc=sys.exc_info(), skip_frames=1) else: try: t = Trial(module=exercise, stdout=stdout.getvalue()) import check try: check.check(t, c) except c.Done: pass except Exception: # Something went wrong in the checking code. c.add_result('ERROR', exc=sys.exc_info()) finally: results['stdout'] = stdout.getvalue() results['checks'] = c.results return results
def main(): from constants import A2, b2 x, U = solve(A2, b2) print("Roots: {}".format(x)) check(A2, x, b2) det = determinant(U) print() print("Determinant: {}".format(det)) print() inversed = inverse(A2, U) print("Inverse matrix:") gk.print_matrix_precise(inversed)
def main(): from constants import A1, b1 x = solve(A1, b1) print("Roots: {}".format(x)) check(A1, x, b1) det = determinant(A1) print() print("Determinant: {}".format(det)) print() inversed = inverse(A1) print("Inverse matrix:") print_matrix_precise(inversed)
def create_boss(): """ Create the level2 BOSS enemy (smart bullets) """ if config.B == "": config.B = obstacle.Boss(common.R6, common.MIDS_R) else: try: check.check(config.B.x_pos, config.B.y_pos, "Boss") if randint(0, 5) == 1: config.B.shoot(config.M.x_pos) except config.MarioAbove: config.STAGE = "won"
def main(options): """ Main function for the build command. Inputs: options[argparse options]: Complete options from argparse, see MooseDocs/main.py """ # Make sure "large_media" exists in MOOSE _init_large_media() # Create translator translator, _ = common.load_config(options.config) if options.destination: translator.update(destination=mooseutils.eval_path(options.destination)) translator.init() # Replace "home" with local server if options.serve: home = 'http://127.0.0.1:{}'.format(options.port) translator.renderer.update(home=home) elif options.home: translator.renderer.update(home=options.home) # Dump page tree if options.dump: print translator.root # Clean when --files is NOT used or when --clean is used with --files. if ((options.files == []) or (options.files != [] and options.clean)) \ and os.path.exists(translator['destination']): log = logging.getLogger('MooseDocs.build') log.info("Cleaning destination %s", translator['destination']) shutil.rmtree(translator['destination']) # Perform check if options.check: check(translator) # Perform build if options.files: for filename in options.files: node = translator.root.findall(filename)[0] node.build() else: translator.execute(options.num_threads) if options.serve: watcher = MooseDocsWatcher(translator, options) server = livereload.Server(watcher=watcher) server.serve(root=translator['destination'], host=options.host, port=options.port)
def test(self): self.assertEqual(check([66, 101], 66), True) self.assertEqual(check([80, 117, 115, 104, 45, 85, 112, 115], 45), True) self.assertEqual(check(['t', 'e', 's', 't'], 'e'), True) self.assertEqual(check(['what', 'a', 'great', 'kata'], 'kat'), False) self.assertEqual(check([66,'codewars', 11, 'alex loves pushups'], 'alex loves pushups'), True) self.assertEqual(check(['come', 'on', 110, '2500', 10, '!', 7, 15], 'Come'), False) self.assertEqual(check(['when\'s', 'the', 'next', 'Katathon?', 9, 7], 'Katathon?'), True) self.assertEqual(check([8, 7, 5, 'bored', 'of', 'writing', 'tests', 115], 45), False) self.assertEqual(check(['anyone', 'want', 'to', 'hire', 'me?'], 'me?'), True)
def main(): from constants import A2, b2 n = len(A2) L, U = lu(A2) y = [] Lb = gk.append_column(L, b2) for row in xrange(n): y.append(gk.backward_left(Lb, row)) Uy = gk.append_column(U, [[e] for e in y]) x = [] for row in xrange(n - 1, -1, -1): x.append(gk.backward_right(Uy, row)) x.reverse() check(A2, x, b2)
def after_obs_bok (yr, mn, dy, run=None, path=None) : ''' A shell calls ls, check, collect, and footprint. Only for xao Standard path is /home/primefocus/data/bss/yyyymmdd Args: yr: year mn: month dy: day run: run name, default is yyyymm path: optional, root path of the day ''' # arguments default if run is None or run == "" : run = "{y:0>4d}{m:0>2d}".format(y=yr, m=mn, d=dy) if path is None or path == "" : path = "/home/primefocus/data/bss/{y:0>4d}{m:0>2d}{d:0>2d}".format(y=yr, m=mn, d=dy) # data of night class psite : pass site = psite() site.tz = -7 mjd18 = common.sky.mjd_of_night(yr, mn, dy, site) tel = "bok" # files of the day filelist = "{tel}/obsed/{run}/files.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) checkfile = "{tel}/obsed/{run}/check.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) obsedfile = "{tel}/obsed/{run}/obsed.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) repfile = "{tel}/obsed/footprint/Rep_J{mjd}.txt".format(tel=tel, mjd=mjd18) equfile = "{tel}/obsed/footprint/Equ_J{mjd}.png".format(tel=tel, mjd=mjd18) galfile = "{tel}/obsed/footprint/Gal_J{mjd}.png".format(tel=tel, mjd=mjd18) os.mkdir("{tel}/obsed/{run}".format(tel=tel, run=run, day=mjd18)) # 1. call ls os.system("ls {path}/d*.fits > {filelist}".format(path=path, filelist=filelist)) # 2. call check check(tel, yr, mn, dy, run) # 3. call collect collect(tel, yr, mn, dy, run) # 4. call footprint footprint(tel, reportfile=repfile, equfile=equfile, galfile=galfile, run=run, day=mjd18) # info print ("Send following files to SAGE Survey group:" "\n\t{filelist}" "\n\t{checkfile}" "\n\t{obsedfile}" "\n\t{equfile}" "\n\t{galfile}" "\n\t{repfile}".format( filelist=filelist, checkfile=checkfile, obsedfile=obsedfile, equfile=equfile, galfile=galfile, repfile=repfile ))
def main(): 'main' configs_path = os.path.join(os.path.dirname(__file__), CONFIGS_NAME) configs_file = open(configs_path) configs = json.load(configs_file) configs_file.close() data_dir = os.path.join(os.path.dirname(__file__), configs['data dir']) file_names = os.listdir(data_dir) time_format = '%Y-%m-%d %H.%M' times = [ time.strptime(name.replace('.json', ''), time_format) for name in file_names if name.endswith('.json') and not name.startswith('diff') ] times = sorted(times, reverse=True) file_name = time.strftime(time_format, times[0]) + '.json' diff_file_path = check.check(configs_path, os.path.join(data_dir, file_name)) if diff_file_path: diff_file = open(diff_file_path) email.send_email(configs['mail_host'], configs['mail_user'], configs['mail_pwd'], configs['to_addrs'], 'iOS App Store Update', diff_file.read()) diff_file.close()
def do_check(self, line): data = check.check(self.username, self.password) if len(data) == 1: for x in data: try: d = json.loads(x) print """ ID: {0} From: {1} Title: {2} """.format(d['id'], d['from'], d['title']) except: print "You have no messages" else: for x in data: try: chec = json.loads(x) except: continue else: print """ ID: {2} From: {0} Title: {1} """.format(chec['from'], chec['title'], chec['id'])
def login(): while True: userinfo = main() passt = check.check(userinfo['Gn'], userinfo['Gp'], userinfo['Un'], userinfo['Up']) if passt: break
def create_all(entrez, force=False): results = {'titles':{}, 'checked':{}, 'template':'', 'stub':''} try: jdocs = mgi.get_json_documents(entrez) except ValueError: # invalid entrez return None t = {} titles = [] genejson = jdocs['gene_json'] t['name'] = genejson['name'].capitalize() t['symbol'] = genejson['symbol'] t['altsym'] = t['symbol']+' (gene)' t['templatename'] = 'Template:PBB/'+entrez titles = [t[x] for x in t] checked = check(titles) if checked[t['templatename']] == 'missing' or force: results['template'] = str(mgi.parse_json(**jdocs)) if not (checked[t['name']] or checked[t['symbol']] or checked[t['altsym']]) or force: results['stub'] = article.create_stub(entrez) results['titles'] = t results['checked'] = checked return results
def sign_in(): username = input("enter username(enter new to sign up or q to quit): ") if username == 'new': sign_up() elif username == 'q': sys.exit() pa = input("enter password: "******"Welcome," + role) print('\n') return role else: print("Invalid login!") print('\n') flag = input( "do you want to sign up? (enter y for sign up, q for log out, n for return to menu): " ) if flag == 'y' or flag == 'Y': sign_up() elif flag == 'n' or flag == 'N': return elif flag == 'q' or flag == 'Q': sys.exit()
def main (): for info in targets: cookie_file_path = f"cookies/{info [0]}.p" if not os.path.isfile (cookie_file_path): dump_cookies (info [0]) if len (sys.argv) == 2 and sys.argv [1] == "pre_login": print ("Done with logins") return setup_check () while True: success, token = check () if success: break time.sleep (0.25) # token = "1FAIpQLSdmuiZiYFtEoAz2nF8D8mXz2uuvbtmxNs18ETCfmRWUhK76cQ" # we have the email ladies and gentlemen! # I'm hardcoding the token for right now (testing). # token = "1FAIpQLScyEdVTNejePdBXCtqAww7s-RMkhwRx5WmJRmOfy42Mhhf4lw" # token = "1FAIpQLSc5Cz0RYzqk_jHeF03V0l0CNnATZZ4JVFNBp5xxoEg7F-GR7g" # token = "1FAIpQLSeFEruSgyAyJiXwWMUENGO0YbOKH0PkI92vBzBAxGGy1h5b7g" start = time.time () with concurrent.futures.ThreadPoolExecutor () as executor: for info in targets: executor.submit (analyze, token, info) end = time.time () print (f"submitted {len (targets)} responses in {end - start} seconds")
def phase_1(alpha_in): # Active learning of regular right-hand side from Bastani et al. # # Each generalization step selects a single bracketed substring # T[alpha] and generates candidates based on decompositions of alpha # i.e. an expression of alpha as alpha = a_1, a_2, ..a_k # # Each iteration of the while loop corresponds to one generalization step. # The code below follows Algorithm 1, page 3 in the paper. # Seed input alpha_in is annotated rep(alpha_in) curr_reg = String([alpha_in], 1) done = False while not done: started = False # The get_candidates function supplies candidates, and is equivalent to the function "ConstructCandidates()" in the paper. for regex in get_candidates(curr_reg): started = True if regex == NON_GENERALIZABLE: # No more generalizations are possible. We are done with Phase 1. done = True break regex = linearize_rep(regex) exprs = list(to_strings(regex)) ay = copy.deepcopy(regex) ay = linearize_rep(ay) var = str(get_dict(ay)) if var in valid_regexes: continue if str(regex) in regex_map: all_true = regex_map[str(regex)] else: all_true = all(check.check(expr, regex) for expr in exprs) regex_map[str(regex)] = all_true if all_true: # we found the candidate for the next generalization step ayy = copy.deepcopy(regex) var = str(get_dict(ayy)) valid_regexes.add(var) curr_reg = regex break if not started: break # Before executing the Character Generalization Phase, we break strings in # regex into separate chars, that is, given a String-regex, we break it into # a Seq-regex that contains String-regexes each containing a single char. # This way we can systematically generalize each char/terminal/sigma_i # separately. atomized_reg = atomize(curr_reg) final_reg = character_generalization_phase(atomized_reg) return linearize_alt(final_reg)
def gen(self): f = open('./exercise.txt', 'a+') f2 = open('./answer.txt', 'a+') f.seek(0) f2.seek(0) f.truncate() f2.truncate() count = 0 while True: try: elist, answer = self.gen_combine() except Exception as e: # print(e) continue # 临时作处理:当0位除数 和 负数情况 if check.check(elist, e_file='./exercise.txt', a_file='./answer.txt') == True: # True表示检查后无重复 f.write("题目" + str(count + 1) + ": " + ' '.join(elist) + ' =\n') if re.search('/', answer): d, n = answer.split('/') if int(d) > int(n): # print(answer) answer = self.__to_fraction(answer) f2.write("答案" + str(count + 1) + ": " + answer + '\n') count += 1 if count == self.gen_need: break f.close() f2.close()
def main(): backend_name = f"backend.{ARGUMENTS['backend']}" if not backend_exists(backend_name): print(f"ERROR: Specified backend `{ARGUMENTS['backend']}` does not exists") exit(1) generator_name = f"{backend_name}.generators.{ARGUMENTS['format']}_generator" if not generator_exists(generator_name): print(f"ERROR: Specified generator `{generator_name}` does not exists") exit(1) generator_module = importlib.import_module(generator_name) # Checks that the generator has the generate function if not hasattr(generator_module, 'generate'): print( f'Module {generator_name} does not have required `generate` function') exit(1) # Generator is OK, we can continue processing the program metamodel = metamodel_from_file('model.tx') from textx.exceptions import TextXSyntaxError try: model = metamodel.model_from_file(ARGUMENTS['filename']) except TextXSyntaxError as err: print(f'ERROR: line {err.line}, column {err.col}: {err.message}') exit(1) mp = ModelParser(model) output = mp.parse_model() # Remove the extension from the output filename if the user added it despite the help message try: extension_index = ARGUMENTS['output'].rindex('.') ARGUMENTS['output'] = ARGUMENTS['output'][0:extension_index] except ValueError: pass if ARGUMENTS['check']: from check import check check(output, ARGUMENTS['check']) print('Everything OK') else: generator_module.generate(output, ARGUMENTS['output'])
def phase_1(alpha_in): # active learning of regular righthandside from bastani et al. # the idea is as follows: We choose a single nt to refine, and a single # alternative at a time. # Then, consider that single alternative as a sting, with each token a # character. Then apply regular expression synthesis to determine the # abstraction candiates. Place each abstraction candidate as the replacement # for that nt, and generate the minimum string. Evaluate and verify that # the string is accepted (adv: verify that the derivation tree is # as expected). Do this for each alternative, and we have the list of actual # alternatives. # seed input alpha_in is annotated rep(alpha_in) # Then, each generalization step selects a single bracketed substring # T[alpha] and generates candiates based on decompositions of alpha # i.e. an expression of alpha as alpha = a_1, a_2, ..a_k for regex in gen_rep(alpha_in): all_true = False for expr in to_strings(regex): if regex_map.get(regex, False): v = check.check(expr, regex) regex_map[regex] = v if not v: # this regex failed #print('X', regex) all_true = False break # one sample of regex failed. Exit elif regex not in regex_map: v = check.check(expr, regex) regex_map[regex] = v if not v: # this regex failed. #print('X', regex) all_true = False break # one sample of regex failed. Exit all_true = True if all_true: # get the first regex that covers all samples. #print("nt:", nt, 'rule:', str(regex)) return regex #raise Exception() # this should never happen. At least one -- the original -- should succeed return None for k in regex_map: if regex_map[k]: print('-> ', str(k), file=sys.stderr) print('', file=sys.stderr) regex_map.clear() sys.stdout.flush()
def do_check(self, line): addr = db.data.find("data", "all")[0]['addr'] check_ = check.check(addr) if not check_: print "You have no messages." else: for x in check_: print x
def main(options): """ Main function for the build command. Inputs: options[argparse options]: Complete options from argparse, see MooseDocs/main.py """ # Make sure "large_media" exists in MOOSE _init_large_media() # Create translator translator, _ = common.load_config(options.config) translator.init(options.destination) # Replace "home" with local server if options.serve: home = 'http://127.0.0.1:{}'.format(options.port) translator.renderer.update(home=home) elif options.home: translator.renderer.update(home=options.home) # Dump page tree if options.dump: print translator.root # Clean if options.clean: shutil.rmtree(options.destination) # Perform check if options.check: check(translator) # Perform build if options.files: for filename in options.files: node = translator.root.findall(filename)[0] node.build() else: translator.execute(options.num_threads) if options.serve: watcher = MooseDocsWatcher(translator, options) server = livereload.Server(watcher=watcher) server.serve(root=options.destination, port=options.port)
def main(options): """ Main function for the build command. Inputs: options[argparse options]: Complete options from argparse, see MooseDocs/main.py """ # Make sure "large_media" exists in MOOSE _init_large_media() # Create translator translator, config = common.load_config(options.config) translator.init(options.destination) # Replace "home" with local server if options.serve: home = 'http://127.0.0.1:{}'.format(options.port) translator.renderer.update(home=home) elif options.home: translator.renderer.update(home=options.home) # Dump page tree if options.dump: print translator.root # Clean if options.clean: shutil.rmtree(options.destination) # Perform check if options.check: check(translator, config) # Perform build if options.files: for filename in options.files: node = translator.root.findall(filename)[0] node.build() else: translator.execute(options.num_threads) if options.serve: watcher = MooseDocsWatcher(translator, options) server = livereload.Server(watcher=watcher) server.serve(root=options.destination, port=options.port)
def mail_body(self): # 邮件主体 mail_body = { 'subject': '巡视报告', 'content_html': [check()], # 纯文本或者HTML内容 # 附件 # 'attachments': ['d://20200714174905.png'], } return mail_body
def consider_merging(a, b, key, cfg, start): g = gen_new_grammar(a, b, key, cfg) fzz = fuzz.LimitFuzzer(g) for i in range(config.P3Check): v = fzz.fuzz(start) r = check.check(v) if not r: return None return g
def save(path, store): rep = 0 use = 0 name = path[0:path.find('.')] img = cv2.imread('Images\\' + path) master = cv2.imread("Saves\\master.png") if (master is None): master = [] master = list(master) max_ind = int(open("Saves\\max_ind.txt", 'r').read()) replace = "" for i in range(0, len(img) - 32, 32): for j in range(0, len(img[0]) - 32, 32): sections = img[i:i + 32] save = [] for section in sections: save.append(section[j:j + 32]) c = check.check(save, master, store) #c = -1 if (c == -1): h = hash(str(save)) if (not h in store): store[h] = [] store[h].append(max_ind) master.extend(save) replace += str(max_ind) + " " max_ind += 1 use += 32 * 32 else: rep += 32 * 32 replace += str(c) + " " replace += "\n" cv2.imwrite("Saves\\master.png", np.array(master), [cv2.IMWRITE_PNG_COMPRESSION, 3]) file = open("New Images\\" + name + ".rna", 'w') file.write(replace) file.close() file = open("Saves\\max_ind.txt", 'w') file.write(str(max_ind)) file.close() print(name, "SAVED", rep, "USED", use)
def post(self): response = cgi.escape(self.request.get('content')) self.response.out.write('<html><body><p>You wrote ') self.response.out.write(check.num(response)) self.response.out.write(': ') self.response.out.write(response) self.response.out.write('.</p><p>') self.response.out.write(check.check(response)) self.response.out.write('</p></body></html>')
def initlize(): if os.getuid() != 0: print "current user is not root , please change it .........." sys.exit(100) if not check(): print "....something wrong happens , please rapair it" sys.exit(100) init_syspath()
def __test_case(g): '''Test with a Specific graph ''' print "Balancing Graph" r = balance(g) g.remove_edges_from(r) assert check(g) ,"Graph is not balanced" print "Find a R, and result B-structure" print " removed : %s" % r print " left edges: %s" % g.edges()
def handle_result(request): url = request.GET.get('url') bytes_url = url.encode('utf-8') if len(bytes_url) > 250: return web.Response(status=400, body='url parameter is too long', content_type='text/plain; charset=utf-8') c = yield from check(url) template = env.get_template('result.html') return web.Response(body=template.render(c=c).encode('utf-8'), content_type='text/html; charset=utf-8')
def test_module_alias_cp19656(): stuff_mod = path_combine(testpath.public_testdir, "stuff.py") check_mod = path_combine(testpath.public_testdir, "check.py") try: write_to_file(stuff_mod, "Keys = 3") write_to_file(check_mod, "def check(module):\n return module.Keys") import stuff from check import check AreEqual(check(stuff), 3) finally: import nt nt.unlink(stuff_mod) nt.unlink(check_mod)
def main(): """主函数 用来进行数据处理和日志记录,以及命令行的debug模式 """ if not check(): sys.exit(100) write_logger('info','CSServer Check OK') write_logger('info','CSServer starts') server = ThreadedTCPServer(addr,Servers) try: server.serve_forever() except KeyboardInterrupt,e: write_logger('debug', 'You cancel it!!!!!')
def possibilityTab(board): cTab=[] for i in range(9): cTab.append([]) for j in range(9): cTab[i].append([]) for i in range(len(board)): for j in range(len(board)): if board[i][j] == '0': for l in range(9): if check(board, str(l+1),j,i) == 1: cTab[i][j].append(str(l+1)) return cTab
def check_name(config,client): client_id,client_secret = client user = config['user'].encode('ascii') refresh_token = config['refresh_token'].encode('ascii') response = oauth2.RefreshToken(client_id, client_secret, refresh_token) access_token = response['access_token'] auth_string = oauth2.GenerateOAuth2String(user, access_token, base64_encode=False) ret1, print_mail = check.check(auth_string) if ret1 > 0: ret2 = '\n' + config['user']+'\n'+print_mail else: ret2 = '.' return ret2
def main(): exit_status = 0 for filename in sys.argv[1:]: filename = os.path.abspath(filename) for line in check.check( configuration_filename=get_configuration_base_name(filename), command=get_command(filename), filename=filename, verbose_file=sys.stderr): sys.stderr.write(line) exit_status = 1 return exit_status
def test_module_alias_cp19656(self): old_path = [x for x in sys.path] sys.path.append(self.test_dir) stuff_mod = os.path.join(self.test_dir, "stuff.py") check_mod = os.path.join(self.test_dir, "check.py") try: self.write_to_file(stuff_mod, "Keys = 3") self.write_to_file(check_mod, "def check(module):\n return module.Keys") import stuff from check import check self.assertEqual(check(stuff), 3) finally: os.unlink(stuff_mod) os.unlink(check_mod) sys.path = old_path
def setValues(cTab, tab, x, y, ex, ey): newTab = [] newTab = tab coX=0 coY=0 for i in range(y, ey): for j in range(x, ex): if newTab[i][j] == '0': box=[] for k in range(len(cTab[i][j])): char = random.choice(cTab[i][j]) cTab[i][j].remove(char) box.append(char) if check(tab,char,j,i) == 1: newTab[i]=replaceChar(newTab[i], j, char) coX=j coY=i if newTab[i][j] != '0': setValues(cTab, newTab, coX, coY, j, i) cTab[i][j]=box return newTab
if __name__ == '__main__': print 'Warning: make sure that caffe is on the python path!' for arg in sys.argv: if "classifier-dir=" in arg: classifier_dir = os.path.abspath(arg.split('=')[-1]) elif "symlink-dir=" in arg: symlink_dir = os.path.abspath(arg.split('=')[-1]) elif "data-info=" in arg: data_info = os.path.abspath(arg.split('=')[-1]) redbox = False if '--redbox' in sys.argv: redbox = True if check.check(symlink_dir, data_info) != [0,0] and not redbox: print 'ERROR: mismatch between test files in data_dir and data_info' sys.exit() if redbox: flag_val = create_redbox_data_info_etc(symlink_dir, data_info) PRETRAINED = get_pretrained_model(classifier_dir) already_pred = oj(data_info, PRETRAINED.split('/')[-1]+'_pred.npy') if os.path.isfile(already_pred) and raw_input('found %s; use? ([Y]/N) '%(already_pred)) != 'N': d = (np.load(already_pred)).item() else: d = classify_data(classifier_dir, symlink_dir, data_info, PRETRAINED, redbox) if redbox: d = arrange_preds_with_flag_val(d, flag_val)
def check(self): self.mainSizer.Clear(deleteWindows=True) self.checkW=check.check( self ) self.Bind( wx.EVT_TIMER, self.checkW.zamknij, self.stoper3,self.id3 )
book.append(text) from jinja2 import Environment env = Environment() import macros env.globals.update(vars(macros)) t = env.from_string('\n\n'.join(book)) book = t.render() with open(".temp.md", 'w') as f: f.write(book) pandoc = pandoc.bake('.temp.md', f='markdown', smart=True, toc=True, standalone=True, chapters=True) pandoc(output="build/book.pdf", template="src/template") pandoc(output="build/book.html", t="html5") pandoc(output="build/book.odt") pandoc(output="build/book.md", t="markdown_github") os.unlink('.temp.md') bads = check('\n\n'.join(book)) if bads: print "The following should be addressed:" for bad_type, bad in bads: print bad_type, bad print bad_type.title(), ":", bad
def check(net, level): if level > 0: import check check.check(net, level, netchecks = netchecks)
from rpy2.robjects import r as R from check import start_log, check, run, save_grades lab = "ex2" language = "r" possible = 6 record = True # turn on when ready to record grades g = globals() score = start_log(lab, 0, possible, g, record, language) r_source = R['source'] try: m = r_source(lab+'.R') except: pass run("R('testInput = c(\" hello \")')", g) run("R('correctOutput = c(\"hello\")')", g) run("R('functionOutput = rmMultipleBlanks(testInput)')", g) score += check("R('all.equal(correctOutput, functionOutput)')[0]", True, 3, g) run("R('testInput = c(\" hello, world \", \"\\n \\tStat 133 \")')", g) run("R('correctOutput = c(\"hello, world\", \"Stat 133\")')", g) run("R('functionOutput = rmMultipleBlanks(testInput)')", g) score += check("R('all.equal(correctOutput, functionOutput)')[0]", True, 3, g) save_grades(lab, score, possible, record)
q_label, q_amount = ImageReader.readLabels('../../data/mnist/t10k-labels.idx1-ubyte') q_images, q_amount, row, col = ImageReader.readImages('../../data/mnist/t10k-images.idx3-ubyte') it = int(sys.argv[1]) teta = zeros((row*col + 1, 10), dtype = float64) best_it = -1 min_error = 1. if os.path.exists("out"): for i in os.listdir("out"): fn = "out/" + i os.remove(fn) os.removedirs("out") os.makedirs("out") for h in range(it): for i in range(amount): dig = label[i] if dot(teta[:, dig].T, images[:, i]) <= 0: teta[:, dig] = teta[:, dig] + images[:, i] for d in range(10): if (d != dig) and (dot(teta[:, d].T, images[:, i]) > 0): teta[:, d] = teta[:, d] - images[:, i] print "At iteration " + str(h) curr_error = check.check(q_images, q_label, q_amount, row, col, teta) if curr_error < min_error: min_error = curr_error best_it = h fname = "out/teta_" + str(h); fprintMat(fname, teta, row*col + 1, 10) print "Best iteration is " + str(best_it) print "Minimal error is " + str(min_error) print "Finished"
def main(options): """ Main function for the build command. Inputs: options[argparse options]: Complete options from argparse, see MooseDocs/main.py """ # Make sure "large_media" exists in MOOSE _init_large_media() # Setup executioner kwargs = dict() if options.executioner: kwargs['Executioner'] = {'type':options.executioner} # Create translator translator, _ = common.load_config(options.config, **kwargs) if options.destination: translator.update(destination=mooseutils.eval_path(options.destination)) if options.profile: translator.executioner.update(profile=True) translator.init() # Disable slow extensions for --fast if options.fast: options.disable.append('appsyntax') options.disable.append('navigation') # Disable extensions based on command line arguments if options.disable: for ext in translator.extensions: if ext.name in options.disable: #pylint: disable=protected-access ext.setActive(False) # Replace "home" with local server if options.serve: for ext in translator.extensions: if 'home' in ext: ext.update(home='http://127.0.0.1:{}'.format(options.port), set_initial=True) # Dump page tree if options.dump: for page in translator.content: print '{}: {}'.format(page.local, page.source) sys.exit() # Clean when --files is NOT used or when --clean is used with --files. if ((options.files == []) or (options.files != [] and options.clean)) \ and os.path.exists(translator['destination']): log = logging.getLogger('MooseDocs.build') log.info("Cleaning destination %s", translator['destination']) shutil.rmtree(translator['destination']) # Perform check if options.check: check(translator) # Perform build if options.files: nodes = [] for filename in options.files: nodes += translator.findPages(filename) translator.execute(options.num_threads, nodes) else: translator.execute(options.num_threads) if options.serve: watcher = MooseDocsWatcher(translator, options) server = livereload.Server(watcher=watcher) server.serve(root=translator['destination'], host=options.host, port=options.port) return 0
__author__='vxst' import time.sleep as sleep import check.check as check tick = 0 while True: check(tick) tick += 1 sleep(3600 * 2)
__author__ = 'vxst' import check for i in range(0, 8): check.check(i)
} sparql_endpoint = 'http://localhost:8890/sparql/' articles_url = 'http://dumps.wikimedia.org/plwiki/20110802/plwiki-20110802-pages-articles.xml.bz2' lang = 'pl' data_source = "http://dbpedia.org" if lang == 'en' else 'http://%s.dbpedia.org' % lang main_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')) + '/' #Relative paths data_path = main_path + 'data/' ext_path = main_path + 'ext/' raw_articles_path = data_path + '%s/articles' % lang wikidump_path = data_path + '%s/wiki' % lang cache_path = main_path + 'cache/' entities_path = cache_path + '%s/entities.pkl' % lang synonyms_path = cache_path + '%s/synonyms.pkl' % lang articles_cache_path = \ cache_path + '%s/articles_spejd/%%s' % lang if use_parser and parser_type == 'spejd' \ else cache_path + '%s/dep_articles/%%s' % lang if use_parser and parser_type == 'dependency' \ else cache_path + '%s/articles/%%s' % lang candidates_cache_path = cache_path + '%s/candidates/%%s' % lang models_cache_path = cache_path + '%s/models/%%s' % lang results_path = main_path + 'results/%s/' % lang tests_path = main_path + 'tests/%s/' % lang spejd_path = ext_path + 'spejd-1.3.6' maltparser_path = ext_path + 'maltparser-1.7.1/' if __name__ == '__main__': import check check.check()
def runProbSolver(p_, solver_str_or_instance=None, *args, **kwargs): #p = copy.deepcopy(p_, memo=None, _nil=[]) p = p_ if len(args) != 0: p.err('unexpected args for p.solve()') if hasattr(p, 'was_involved'): p.err("""You can't run same prob instance for twice. Please reassign prob struct. You can avoid it via using FuncDesigner oosystem.""") else: p.was_involved = True if solver_str_or_instance is None: if hasattr(p, 'solver'): solver_str_or_instance = p.solver elif 'solver' in kwargs.keys(): solver_str_or_instance = kwargs['solver'] if type(solver_str_or_instance) is str and ':' in solver_str_or_instance: isConverter = True probTypeToConvert, solverName = solver_str_or_instance.split(':', 1) p.solver = getSolverFromStringName(p, solverName) solver_params = {} #return converter(solverName, *args, **kwargs) else: isConverter = False if solver_str_or_instance is None: p.err('you should provide name of solver') elif type(solver_str_or_instance) is str: p.solver = getSolverFromStringName(p, solver_str_or_instance) else: # solver_str_or_instance is oosolver if not solver_str_or_instance.isInstalled: p.err(''' solver %s seems to be uninstalled yet, check http://openopt.org/%s for install instructions''' % (solver_str_or_instance.__name__, p.probType)) p.solver = solver_str_or_instance for key, value in solver_str_or_instance.fieldsForProbInstance.items(): setattr(p, key, value) p.isConverterInvolved = isConverter old_err = seterr(all= 'ignore') if 'debug' in kwargs.keys(): p.debug = kwargs['debug'] probAttributes = set(p.__dict__) solverAttributes = set(p.solver.__dict__) intersection = list(probAttributes.intersection(solverAttributes)) if len(intersection) != 0: if p.debug: p.warn(''' attribute %s is present in both solver and prob (probably you assigned solver parameter in prob constructor), the attribute will be assigned to solver''' % intersection[0]) for elem in intersection: setattr(p.solver, elem, getattr(p, elem)) solver = p.solver.__solver__ for key, value in kwargs.items(): if hasattr(p.solver, key): if isConverter: solver_params[key] = value else: setattr(p.solver, key, value) elif hasattr(p, key): setattr(p, key, value) else: p.warn('incorrect parameter for prob.solve(): "' + str(key) + '" - will be ignored (this one has been not found in neither prob nor ' + p.solver.__name__ + ' solver parameters)') if p.probType == 'EIG' and 'goal' in kwargs: p.err('for EIG parameter "goal" should be used only in class instance definition, not in "solve" method') p.iterValues = EmptyClass() p.iterCPUTime = [] p.iterTime = [] p.iterValues.x = [] # iter points p.iterValues.f = [] # iter ObjFunc Values p.iterValues.r = [] # iter MaxResidual p.iterValues.rt = [] # iter MaxResidual Type: 'c', 'h', 'lb' etc p.iterValues.ri = [] # iter MaxResidual Index p.solutions = [] # list of solutions, may contain several elements for interalg and mb other solvers if p._baseClassName == 'NonLin':p.iterValues.nNaNs = [] # number of constraints equal to numpy.nan if p.goal in ['max','maximum']: p.invertObjFunc = True #TODO: remove it! p.advanced = EmptyClass() p.istop = 0 p.iter = 0 p.graphics.nPointsPlotted = 0 p.finalIterFcnFinished = False #for fn in p.nEvals.keys(): p.nEvals[fn] = 0 # NB! f num is used in LP/QP/MILP/etc stop criteria check p.msg = '' if not type(p.callback) in (list, tuple): p.callback = [p.callback] if hasattr(p, 'xlabel'): p.graphics.xlabel = p.xlabel if p.graphics.xlabel == 'nf': p.iterValues.nf = [] # iter ObjFunc evaluation number T = time() C = clock() p._Prepare() T = time() - T C = clock() - C if T > 1 or C > 1: p.disp('Initialization: Time = %0.1f CPUTime = %0.1f' % (T, C)) for fn in ['FunEvals', 'Iter', 'Time', 'CPUTime']: if hasattr(p,'min'+fn) and hasattr(p,'max'+fn) and getattr(p,'max'+fn) < getattr(p,'min'+fn): p.warn('min' + fn + ' (' + str(getattr(p,'min'+fn)) +') exceeds ' + 'max' + fn + '(' + str(getattr(p,'max'+fn)) +'), setting latter to former') setattr(p,'max'+fn, getattr(p,'min'+fn)) for fn in ['maxFunEvals', 'maxIter']: setattr(p, fn, int(getattr(p, fn)))# to prevent warnings from numbers like 1e7 if hasattr(p, 'x0'): try: p.x0 = atleast_1d(asfarray(p.x0).copy()) except NotImplementedError: p.x0 = asfarray(p.x0.tolist()) for fn in ['lb', 'ub', 'b', 'beq']: if hasattr(p, fn): fv = getattr(p, fn) if fv is not None:# and fv != []: if str(type(fv)) == "<class 'map'>": p.err("Python3 incompatibility with previous versions: you can't use 'map' here, use rendered value instead") setattr(p, fn, asfarray(fv).flatten()) else: setattr(p, fn, asfarray([])) if p.solver._requiresFiniteBoxBounds: ind1, ind2 = isinf(p.lb), isinf(p.ub) if isscalar(p.implicitBounds): p.implicitBounds = (-p.implicitBounds, p.implicitBounds) # may be from lp2nlp converter, thus omit nlp init code p.lb[ind1] = p.implicitBounds[0] if asarray(p.implicitBounds[0]).size == 1 else p.implicitBounds[0][ind1] p.ub[ind2] = p.implicitBounds[1] if asarray(p.implicitBounds[1]).size == 1 else p.implicitBounds[0][ind2] # if p.lb.size == 0: # p.lb = -inf * ones(p.n) # if p.ub.size == 0: # p.ub = inf * ones(p.n) p.stopdict = {} for s in ['b','beq']: if hasattr(p, s): setattr(p, 'n'+s, len(getattr(p, s))) #if p.probType not in ['LP', 'QP', 'MILP', 'LLSP']: p.objFunc(p.x0) p.isUC = p._isUnconstrained() isIterPointAlwaysFeasible = p.solver.__isIterPointAlwaysFeasible__ if type(p.solver.__isIterPointAlwaysFeasible__) == bool \ else p.solver.__isIterPointAlwaysFeasible__(p) if isIterPointAlwaysFeasible: #assert p.data4TextOutput[-1] == 'log10(maxResidual)' if p.data4TextOutput[-1] == 'log10(maxResidual)': p.data4TextOutput = p.data4TextOutput[:-1] # else: # p.err('bug in runProbSolver.py') elif p.useScaledResidualOutput: p.data4TextOutput[-1] = 'log10(MaxResidual/ConTol)' if p.showFeas and p.data4TextOutput[-1] != 'isFeasible': p.data4TextOutput.append('isFeasible') if p.maxSolutions != 1: p._nObtainedSolutions = 0 p.data4TextOutput.append('nSolutions') if not p.solver.iterfcnConnected: if SMALL_DELTA_X in p.kernelIterFuncs: p.kernelIterFuncs.pop(SMALL_DELTA_X) if SMALL_DELTA_F in p.kernelIterFuncs: p.kernelIterFuncs.pop(SMALL_DELTA_F) if not p.solver._canHandleScipySparse: if hasattr(p.A, 'toarray'): p.A = p.A.toarray() if hasattr(p.Aeq, 'toarray'): p.Aeq = p.Aeq.toarray() if isinstance(p.A, ndarray) and type(p.A) != ndarray: # numpy matrix p.A = p.A.A if isinstance(p.Aeq, ndarray) and type(p.Aeq) != ndarray: # numpy matrix p.Aeq = p.Aeq.A if hasattr(p, 'optVars'): p.err('"optVars" is deprecated, use "freeVars" instead ("optVars" is not appropriate for some prob types, e.g. systems of (non)linear equations)') # p.xf = nan * ones([p.n, 1]) # p.ff = nan #todo : add scaling, etc p.primalConTol = p.contol if not p.solver.__name__.startswith('interalg'): p.contol *= ConTolMultiplier p.timeStart = time() p.cpuTimeStart = clock() # TODO: move it into solver parameters if p.probType not in ('MINLP', 'IP'): p.plotOnlyCurrentMinimum = p.__isNoMoreThanBoxBounded__() ############################ # Start solving problem: if p.iprint >= 0: p.disp('\n' + '-'*25 + ' OpenOpt %s ' % version + '-'*25) pt = p.probType if p.probType != 'NLSP' else 'SNLE' s = 'solver: ' + p.solver.__name__ + ' problem: ' + p.name + ' type: %s' % pt if p.showGoal: s += ' goal: ' + p.goal p.disp(s) p.extras = {} try: if isConverter: pass # TODO: will R be somewhere used? #R = converter(solverName, **solver_params) else: nErr = check(p) if nErr: p.err("prob check results: " +str(nErr) + "ERRORS!")#however, I guess this line will be never reached. if p.probType not in ('IP', 'EIG'): p.iterfcn(p.x0) if hasSetproctitleModule: try: originalName = setproctitle.getproctitle() if originalName.startswith('OpenOpt-'): originalName = None else: s = 'OpenOpt-' + p.solver.__name__ # if p.name != 'unnamed': s += '-' + p.name setproctitle.setproctitle(s) except: originalName = None else: p.pWarn(''' please install setproctitle module (it's available via easy_install and Linux soft channels like apt-get)''') solver(p) if hasSetproctitleModule and originalName is not None: setproctitle.setproctitle(originalName) # except killThread: # if p.plot: # print 'exiting pylab' # import pylab # if hasattr(p, 'figure'): # print 'closing figure' # #p.figure.canvas.draw_drawable = lambda: None # pylab.ioff() # pylab.close() # #pylab.draw() # #pylab.close() # print 'pylab exited' # return None except isSolved: # p.fk = p.f(p.xk) # p.xf = p.xk # p.ff = p.objFuncMultiple2Single(p.fk) if p.istop == 0: p.istop = 1000 finally: seterr(**old_err) ############################ p.contol = p.primalConTol # Solving finished if hasattr(p, '_bestPoint') and not any(isnan(p._bestPoint.x)) and p.probType != 'ODE': p.iterfcn(p._bestPoint) if p.probType != 'EIG': if not hasattr(p, 'xf') and not hasattr(p, 'xk'): p.xf = p.xk = ones(p.n)*nan if hasattr(p, 'xf') and (not hasattr(p, 'xk') or array_equal(p.xk, p.x0)): p.xk = p.xf if not hasattr(p, 'xf') or all(isnan(p.xf)): p.xf = p.xk if p.xf is nan: p.xf = p.xk = ones(p.n)*nan if p.isFeas(p.xf) and (not p.probType=='MINLP' or p.discreteConstraintsAreSatisfied(p.xf)): p.isFeasible = True else: p.isFeasible = False else: p.isFeasible = True # check it! p.isFinished = True # After the feasibility check above! if p.probType == 'MOP': p.isFeasible = True elif p.probType == 'IP': p.isFeasible = p.rk < p.ftol else: p.ff = p.fk = p.objFunc(p.xk) # walkaround for PyPy: if type(p.ff) == ndarray and p.ff.size == 1: p.ff = p.fk = asscalar(p.ff) if not hasattr(p, 'ff') or any(p.ff==nan): p.iterfcn, tmp_iterfcn = lambda *args: None, p.iterfcn p.ff = p.fk p.iterfcn = tmp_iterfcn if p.invertObjFunc: p.fk, p.ff = -p.fk, -p.ff if asfarray(p.ff).size > 1: p.ff = p.objFuncMultiple2Single(p.fk) #p.ff = p.objFuncMultiple2Single(p.ff) #if not hasattr(p, 'xf'): p.xf = p.xk if type(p.xf) in (list, tuple) or isscalar(p.xf): p.xf = asarray(p.xf) p.xf = p.xf.flatten() p.rf = p.getMaxResidual(p.xf) if not p.probType == 'IP' else p.rk if not p.isFeasible and p.istop > 0: p.istop = -100-p.istop/1000.0 if p.istop == 0 and p.iter >= p.maxIter: p.istop, p.msg = IS_MAX_ITER_REACHED, 'Max Iter has been reached' p.stopcase = stopcase(p) p.xk, p.rk = p.xf, p.rf if p.invertObjFunc: p.fk = -p.ff p.iterfcn(p.xf, -p.ff, p.rf) else: p.fk = p.ff p.iterfcn(p.xf, p.ff, p.rf) p.__finalize__() if not p.storeIterPoints: delattr(p.iterValues, 'x') r = OpenOptResult(p) #TODO: add scaling handling!!!!!!! # for fn in ('df', 'dc', 'dh', 'd2f', 'd2c', 'd2h'): # if hasattr(p, '_' + fn): setattr(r, fn, getattr(p, '_'+fn)) p.invertObjFunc = False if p.isFDmodel: p.x0 = p._x0 finalTextOutput(p, r) if not hasattr(p, 'isManagerUsed') or p.isManagerUsed == False: finalShow(p) return r
def backup(): config = '/home/user/backup.conf' #Log block log = parsefile("^logfile:.*", config) if log == None: print('There is an error with logfile! Check your backup.conf. Using /var/log/backup.log as default') log = 'var/log/backup.log' if os.path.exists(log)==False: os.system('touch %s' % log) log = log.split(':') log = log[1] #Checking of list of files to be backuped filelist = check.check() if filelist == None: message = 'Diff backup failed due to previous errors.' return writemessage(log, message) print(len(filelist)) if len(filelist)==0: message = 'There is no new or modified files for diff backup' return writemessage(log, message) #If everything is correct, start backup message = 'Preparing for diff backup... ' writemessage(log, message) #Find paths for diff backup bpaths = parsefile("^diff:.*", config) if bpaths == None: message = 'There is error in your backup.conf with diff:' return writemessage(log, message) bpaths = bpaths.split(":") bpaths = bpaths[1] bpaths = bpaths.split() #Is there existing backup dirs temp_bpaths = [] lastpath = '' newpath = '' #Find existing paths for bpath in bpaths: if os.path.exists(bpath+'/diff') == True: temp_bpaths.append(bpath) #If there is no existing paths, choose first if len(temp_bpaths)==0: newpath = bpaths[0] else: #Else find last point of diff backup by date datelist = [] for temp in temp_bpaths: datelist.append(os.path.getctime(temp)) datelist.sort(reverse = True) lastpath = datelist[0] #Find a path by date for temp in temp_bpaths: if os.path.getctime(temp) == lastpath: lastpath = temp break #Find md5 file in last diff backup dir and check for differences md5 = findmd5(lastpath) hasnew = False with open(md5, 'r') as readfile: lines = readfile.read() for file in filelist: p = subprocess.Popen(['md5sum', file], stdout=subprocess.PIPE, stderr=open(log, 'a')) out = p.stdout.read() if re.search(out, lines) == None: hasnew = True out = out.split() out = out[1] message = 'A new file from last diff backup has been found: ' + out writemessage(log,message) if hasnew == False: message = 'There is no differences between this and last diff backup' return writemessage(log, message) elif bpaths.index(lastpath)==len(bpaths)-1: newpath = bpaths[0] else: newpath = bpaths[bpaths.index(lastpath)+1] message = 'Path to backup: ' + newpath writemessage(log, message) #Make md5 file today = newpath+"/diff/md5-"+str(date.today()) #Remove files from backup dir if os.path.exists(newpath+'/diff') == False: message = 'Make a new dir for diff backup. Path: ' + newpath+'/diff' writemessage(log, message) subprocess.call(["mkdir", "-p", newpath+'/diff'], stderr = open(log, 'a')) else: message = 'Remove old data from backup dir...' writemessage(log, message) subprocess.call(["rm", '-rf', newpath], stderr = open(log, 'a')) subprocess.call(["mkdir", "-p", newpath+'/diff'], stderr = open(log, 'a')) os.system("touch %s" % today) os.system("touch %s" % newpath+'/diff/files.backup') #Copy and put data into md5 file message = 'Starting diff backup...' writemessage(log,message) for file in filelist: p = subprocess.Popen(['cp', '-av', file, newpath+'/diff'], stderr = open(log, 'a'), stdout = subprocess.PIPE) out = p.stdout.read() print(out) with open(log, 'a') as bckfile: bckfile.write(out) with open(newpath+'/diff/files.backup', 'a') as bckfile: bckfile.write(out) if os.path.isfile(file) == True: subprocess.call(['md5sum', file], stdout=open(today, 'a'), stderr = open(log, 'a')) else: for d, dirs, files in os.walk(file): for f in files: path = os.path.join(d,f) subprocess.call(['md5sum', path], stdout=open(today, 'a'), stderr=open(log, 'a')) message = 'Diff backup completed' writemessage(log, message)