def get_url(self): param = '' url = '' if self.env == 2: if self.opt == 'queryplan': param = '&'.join( read_file( os.path.join( main.root_path, CERT_QUERYPLAN_PARAMS_CONFIG_PATH)).split('\r\n')) else: param = '&'.join( read_file( os.path.join( main.root_path, CERT_PHRASELIST_PARAMS_CONFIG_PATH)).split('\r\n')) url = CERT_URL else: if self.opt == 'queryplan': param = '&'.join( read_file( os.path.join( main.root_path, DEV_QUERYPLAN_PARAMS_CONFIG_PATH)).split('\r\n')) else: param = '&'.join( read_file( os.path.join( main.root_path, DEV_PHRASELIST_PARAMS_CONFIG_PATH)).split('\r\n')) url = DEV_URL url = '%s%s%s' % (url, '?', param) return url
def get_headers(self): headers = dict() if self.env == 2: if self.opt == 'queryplan': for line in read_file( os.path.join( main.root_path, CERT_QUERYPLAN_HEADER_CONFIG_PATH)).split('\r\n'): index = len(line.split('=')[0]) + 1 headers[line.split('=')[0]] = line[index:] else: for line in read_file( os.path.join( main.root_path, CERT_PHRASELIST_HEADER_CONFIG_PATH)).split('\r\n'): index = len(line.split('=')[0]) + 1 headers[line.split('=')[0]] = line[index:] else: if self.opt == 'queryplan': for line in read_file( os.path.join( main.root_path, DEV_QUERYPLAN_HEADER_CONFIG_PATH)).split('\r\n'): index = len(line.split('=')[0]) + 1 headers[line.split('=')[0]] = line[index:] else: for line in read_file( os.path.join( main.root_path, DEV_PHRASELIST_HEADER_CONFIG_PATH)).split('\r\n'): index = len(line.split('=')[0]) + 1 headers[line.split('=')[0]] = line[index:] return headers
def plot_vehicle_status(): fig, axs = plt.subplots(3, figsize=(8, 4)) # fig.set_size_inches(18.5, 10.5) plt.subplots_adjust(hspace=0.5, top=0.95, bottom=0.05) ax = axs[1] ax2 = axs[0] # ax = fig.gca() # ax2 = ax.twiny() ax.set_yticks([]) ax2.set_yticks([]) ax.set_xlim([0, 140]) ax2.set_xlim([0, 7500]) ax.set_xlabel("Speed (KM/H)") ax2.set_xlabel("RPM") # plt.show() cur_spd_rect = ax.barh(0.05, 1, color = 'cyan', height = 0.25, edgecolor='none') # set_spd_rect = ax.barh(0.25, 1, color = 'b', height = 0.05, edgecolor='none') rpm_rect = ax2.barh(0.6, 1, color = 'tomato', height = 0.25, edgecolor='none') axamp = axs[2] samp = Slider(axamp, 'Set Spd', 0.0, 140.0, valinit=0) gear_label = plt.text(70, 3.3, " " ,ha='center', va='center', size='x-large', weight='bold') while(1): set_spd = int(samp.val) write_file("set_speed.txt", set_spd) cur_spd_rect.patches[0].set_width(read_file('cur_speed.txt', float)) rpm_rect.patches[0].set_width(read_file('rpm.txt', float)) gear_label.set_text("Gear %d" % read_file('gear.txt', int)) plt.pause(0.2)
def get_params(self): params = dict() if self.env == 2: if self.opt == 'queryplan': for line in read_file( os.path.join( main.root_path, CERT_QUERYPLAN_PARAMS_CONFIG_PATH)).split('\r\n'): params[line.split('=')[0]] = line.split('=')[1] else: for line in read_file( os.path.join( main.root_path, CERT_PHRASELIST_PARAMS_CONFIG_PATH)).split('\r\n'): params[line.split('=')[0]] = line.split('=')[1] else: if self.opt == 'queryplan': for line in read_file( os.path.join( main.root_path, DEV_QUERYPLAN_PARAMS_CONFIG_PATH)).split('\r\n'): params[line.split('=')[0]] = line.split('=')[1] else: for line in read_file( os.path.join( main.root_path, DEV_PHRASELIST_PARAMS_CONFIG_PATH)).split('\r\n'): params[line.split('=')[0]] = line.split('=')[1] return params
def create_dedupe_views(): deduped_lanes_view_v1 = common.read_file(f'{scriptsPath}/deduped_lanes_v1.sql') deduped_lanes_view_v2 = common.read_file(f'{scriptsPath}/deduped_lanes_v2.sql') with pyodbc.connect(common.connectionString) as conn: with conn.cursor() as cursor: logger.info('create deduped views') cursor.execute(deduped_lanes_view_v1) cursor.execute(deduped_lanes_view_v2)
def create_expansion_views(): expanded_lanes_view_v1 = common.read_file(f'{scriptsPath}/expanded_lanes_v1.sql') expanded_lanes_view_v2 = common.read_file(f'{scriptsPath}/expanded_lanes_v2.sql') with pyodbc.connect(common.connectionString) as conn: with conn.cursor() as cursor: logger.info('create expansion views') cursor.execute(expanded_lanes_view_v1) cursor.execute(expanded_lanes_view_v2)
def test_zzz_sqlite(self): src = read_file(test_file('third_party/sqlite/sqlite3.c')) + read_file( test_file('sqlite/speedtest1.c')) self.do_benchmark( 'sqlite', src, 'TOTAL...', native_args=['-ldl', '-pthread'], shared_args=['-I' + test_file('third_party', 'sqlite')], emcc_args=['-s', 'FILESYSTEM', '-s', 'MINIMAL_RUNTIME=0'], # not minimal because of files force_c=True)
def create_tables(): tear_down = common.read_file(f'{scriptsPath}/tear_down.sql') create_tables = common.read_file(f'{scriptsPath}/create_tables.sql') create_materialized_views = common.read_file(f'{scriptsPath}/create_materialized_views.sql') with pyodbc.connect(common.connectionString) as conn: with conn.cursor() as cursor: logger.info('tear down') cursor.execute(tear_down) logger.info('create tables') cursor.execute(create_tables) logger.info('create materialized views') cursor.execute(create_materialized_views)
def load_test_case(test_case): load_tables_template = common.read_file( f'{scriptsPath}/load_tables_template.sql') load_materialized_views = common.read_file( f'{scriptsPath}/load_materialized_views.sql') load_tables_sql = load_tables_template.replace('<<test_case>>', test_case) with pyodbc.connect(common.connectionString) as conn: with conn.cursor() as cursor: logger.info('load standard tables') cursor.execute(load_tables_sql) logger.info('load materialized views') cursor.execute(load_materialized_views)
def part_2(): I = read_file('day12') x = 0 y = 0 wx = 10 wy = 1 direc = 0 for i in I: action = i[0] val = int(i[1:]) if action == 'F': x += val * wx y += val * wy elif action == 'N': wy += val elif action == 'S': wy -= val elif action == 'W': wx -= val elif action == 'E': wx += val elif action == 'L': direc = val elif action == 'R': direc = 360 - val while direc != 0: wx_new = -wy wy_new = wx wx = wx_new wy = wy_new direc -= 90 print(abs(x) + abs(y)) pass
def status(request): response = HttpResponse() html = '' # Add refresh if(not os.path.exists('/mnt/fd/fb_done')): html = '<head><meta http-equiv=refresh content=30 ></head>' # Add body tags html += '<body>' # Read status message from file html += common.read_file('status.txt') # Replace new line with breaks for html processing html = html.replace('\n', '<br/>') html += '</body>' # Send response for webpage response.write(html) return response
def part_2(): I = read_file('day8') for i in range(len(I)): l = I.copy() if l[i][:3] == 'nop': l[i] = 'jmp' + l[i][3:] if l[i][:3] == 'jmp': l[i] = 'nop' + l[i][3:] looped = False i = 0 acc = 0 visited = set() while i < len(l): if i in visited: looped = True break else: instr = l[i][:3] val = int(l[i][4:]) visited.add(i) if instr == 'acc': acc += val i += 1 elif instr == 'jmp': i += val else: i += 1 if not looped: print(acc)
def query(args): try: appid = common.read_lineconf(common.read_file('wolframalpha-api-key'))[0] except (IOError, IndexError): raise IOError('No appid to WolframAlpha™ was found.') m = re.search(r'^(.*?)( \| (.*?))?$', args) expr = m.group(1) xml = common.read_url('http://api.wolframalpha.com/v2/query?appid={0}&input='.format(appid), expr) output_type = 'approx' if not m.group(3) else m.group(3) root = ET.fromstring(xml) didyoumeans = root.find('didyoumeans') if didyoumeans: return 'Did you mean: \'{0}\'?'.format(didyoumeans.find('didyoumean').text) title = {'approx': 'Decimal approximation', 'exact': 'Exact result'} for pod in root: if pod.tag == 'pod': if pod.attrib['title'] == 'Result' or pod.attrib['title'] == 'Value' or pod.attrib['title'] == title[output_type]: return pod.find('subpod').find('plaintext').text return 'WolframAlpha™ doesn\'t have the answer.'
def lastfm(args): try: key = common.read_lineconf(common.read_file("lastfm-api-key"))[0] if not (len(key) == 32 and re.search(r'^[0-9a-f]+$', key)): raise IOError except (IOError, IndexError): raise IOError('Ingen nyckel för last.fm-API:et kunde hittas. Skapa filen lastfm-api-key med enbart nyckeln i sig i botens arbetskatalog.') try: content = common.read_url("http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&limit=1&api_key={0}&user={1}".format(key, common.quote(args))) except HTTPError: return "Kunde inte hitta en last.fm-användare med namnet {}.".format(args) dom = xml.dom.minidom.parseString(content) latesttrack = dom.getElementsByTagName('track')[0] artist = latesttrack.getElementsByTagName('artist')[0].childNodes[0].data title = latesttrack.getElementsByTagName('name')[0].childNodes[0].data returnstr = "{0}".format(args) if (latesttrack.hasAttribute("nowplaying")): playstatus = "spelar just nu" else: playstatus = "spelade senast" return "{0} {1} {2} ({3}) -- History: http://www.last.fm/user/{0}/tracks".format(args, playstatus, title, artist)
def check_cert(fb): # Get object id from file os.system obj_id = common.read_file('fb_req_obj_id.txt') # Check Facebook for certificate results = fb.fql.query('SELECT id, data FROM SecureGridNet.ipopdata \ WHERE _id IN (SELECT obj_id FROM SecureGridNet.certificate \ WHERE gid = ' + obj_id + ')') # Get result from Facebook database id = '' data = '' for result in results: id = result['id'] data = result['data'] # Return if certificate is not found if(data == ''): return -1 # Check to see if the certificate is for the current user if(id != obj_id): return -1 # Decode data to cert cert = common.fb_decode(data) # Write certificate to file common.write_file('/etc/racoon/certs/host-cert.pem', cert) return 0
def part_1(): I = read_file('day1') I = [int(i) for i in I] offsets = [2020 - i for i in I] combined = I + offsets duplicates = list(set([i for i in combined if combined.count(i) > 1])) print(duplicates[0] * duplicates[1])
def main(): """ Builds the dictionaries and the overcomplete word embeddings Usage: python preprocessing.py -input <original_embs_file> -output <overcomp_file> -factor <factor_overcomplete> -bin <binary_file> <original_embs_file>: the original word embeddings is used to learn denoising <overcomp_file>: the file name of overcomplete word embeddings <factor_overcomplete>: a factor of overcomplete embeddings length (=factor * length of original word embeddings) <binary_file>: 1 for binary; 0 for text """ parser = argparse.ArgumentParser() parser.add_argument('-input', type=str) parser.add_argument('-output', type=str) parser.add_argument('-factor', type=int, default=10) parser.add_argument('-bin', type=int, default=1) args = parser.parse_args() vocab, vecs = read_file(args.input, binary=args.bin) dict_comp = learn_dict(vecs.T, factor=1) dict_overcomp = learn_dict(vecs.T, factor=args.factor) dim_over = args.factor * len(vecs[0]) vecs_overcomp = overcomplete_embs(vecs.T, dim_over) np.save(args.input + '.dict_comp', dict_comp) np.save(args.input + '.dict_overcomp', dict_overcomp) save_file(args.output, vocab, vecs_overcomp.T, binary=args.bin) print 'Preprocessing done!'
def part_2(): I = read_file('day5') seats = sorted([seat_id(l) for l in I]) for i in range(seats[0], seats[-1]): if (i - 1 in seats and i + 1 in seats and i not in seats): print(i) break
def test_zzz_box2d(self): src = read_file(test_file('benchmark', 'test_box2d_benchmark.cpp')) def lib_builder(name, native, env_init): return self.get_library(os.path.join('third_party', 'box2d'), ['box2d.a'], configure=None, native=native, cache_name_extra=name, env_init=env_init) self.do_benchmark('box2d', src, 'frame averages', shared_args=['-I' + test_file('third_party', 'box2d')], lib_builder=lib_builder)
def process_file(filepath, keywords_n=3, sentences_n=2, training=False, encoding='utf-8'): file = open(filepath, encoding=encoding) source = "".join(file.readlines()) file.close() sentences = common.read_file(filepath, encoding=encoding) words = common.get_words(sentences) if not training: print("Generating summary") generated_summary = text_rank_sum.generate_summary( sentences, top_n_words=keywords_n, top_n_sentences=sentences_n) print("Finding keywords") tfidf_keywords = tf_idf.find_tfidf_keywords(sentences, top_n=keywords_n) if not training: obj = { 'source': source, 'words': words, 'keywords_heuristic': generated_summary['keywords'], 'keywords_tfidf': tfidf_keywords, 'summary': generated_summary['summary'], 'train': False } else: obj = { 'words': words, 'source': source, 'train': True, 'keywords_tfidf': tfidf_keywords } common.save_to_db(obj) return obj
def test_life(self): src = read_file(test_file('life.c')) self.do_benchmark('life', src, '''--------------------------------''', shared_args=['-std=c99'], force_c=True)
def test_zzz_lzma(self): src = read_file(test_file('benchmark', 'test_lzma_benchmark.c')) def lib_builder(name, native, env_init): return self.get_library(os.path.join('third_party', 'lzma'), [os.path.join('lzma.a')], configure=None, native=native, cache_name_extra=name, env_init=env_init) self.do_benchmark('lzma', src, 'ok.', shared_args=['-I' + test_file('third_party', 'lzma')], lib_builder=lib_builder)
def truncate_tables(): truncate_tables_sql = common.read_file( f'{scriptsPath}/truncate_standard_tables.sql') with pyodbc.connect(common.connectionString) as conn: with conn.cursor() as cursor: cursor.execute(truncate_tables_sql)
def collect_html(args): """ Collect html data locally, so you don't have to keep hitting the url. :param args: args for collect html :type args: Namespacd """ url_list = args.url_list output_dir = args.output_dir print(url_list) # do some checks try: assert os.path.exists(url_list), 'url_list must exist' assert os.path.exists(output_dir), 'output_dir must exist' except AssertionError as err: logger.error('Failed check: {}'.format(err)) return urls = common.read_file(url_list) for url in urls: logger.debug(url) html = spy_tools.collect_html(url) out = url.split('/') output = os.path.join(output_dir, out[-1] + '.html') common.write_file(html, output)
def main(): # cmdline_args = sys.argv[1:] # if len(cmdline_args) == 0: # print("usage python convolutionalNN.py sample|total") # exit(-1) # dataset = cmdline_args[0] # if dataset == "sample": # time1 = time.time() # training_questions = pickle.load(open( "data/train_40k_qn_pairs.p", "rb" )) # time2 = time.time() # elif operation == "total": # time1 = time.time() # training_questions = pickle.load(open( "data/train_qn_pairs.p", "rb" )) # time2 = time.time() # else: # print("usage python convolutionalNN.py sample|total") # exit(-1) # print ("Loaded Pickle : %f min" % ((time2 - time1)/60)) # google_model = common.load_model("google") # time3 = time.time() # print ("Loaded model: %f min" % ((time3 - time2)/60)) data = common.read_file('data/train_cleaned_features.csv') y_train = data[:, 1] x_train = get_embedded_sentence(None, None) # time4 = time.time() # print ("Obtained Embeddings: %f min" % ((time4 - time3)/60)) cnn_model = build_model() cnn_model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.3) cnn_model.save('cnn_model.h5')
def GetAllActions(dataDir, keyIdx): # TODO:读入所有的*.ms构建all_actions.ms allActions = {} allActionFileName = os.path.join(dataDir, ALL_ACTION_FILE_NAME) allActionsData = read_file(allActionFileName, CODING) allActionsDataList = allActionsData.split(u'\n') for anActionData in allActionsDataList: #print anActionData fields = anActionData.split(u',') id = fields[ACTION_ID_IDX].strip() #kprint id if (id == u"事务名"): continue if (id == u""): continue allActions[id] = fields # 排序 actionsList = sorted(allActions.items(), key=lambda item: item[1][keyIdx], reverse=True) allActionsStr = GetCSVTable(u"事务响应概述", actionsList, allActionsFileds, allActionsTbDesc) return actionsList, allActionsStr
def check_cert_req(fb, gid): # Get signed certificate ids ids_file = common.read_file('signed_cert_ids.txt') # Create array of signed ids signed_ids = ids_file.split('\n') print 'Signed IDs: %s' % (signed_ids) # Get list of request ids from Facebook results = fb.fql.query('SELECT obj_id FROM SecureGridNet.request \ WHERE usr_id IN (SELECT uid FROM group_member \ WHERE gid = ' + str(gid) + ')') # Search for unsigned requests unsigned_req_ids = [] for result in results: if str(result['obj_id']) not in signed_ids: #add to unsigned list unsigned_req_ids.append(result['obj_id']) print 'Unsigned IDs: %s' % (unsigned_req_ids) return unsigned_req_ids
def proc_time(args): """ Process over time. :param args: processes over time :type args: Namespace """ session_info = args.session_info # do some checks try: assert os.path.exists(session_info), 'session_info must exist' except AssertionError as err: logger.error('Failed check: {}'.format(err)) return sess_info = common.read_file(session_info) a = [] # proc for s in sess_info: s = s.split(':') s = s[2].split(' ') del s[0] a.append(float(s[0])) common.plot_proc_info(a, len(a))
def system_load(args): """ Plot system load. :param args: system load args :type args: Namespace """ session_info = args.session_info # do some checks try: assert os.path.exists(session_info), 'session_info must exist' except AssertionError as err: logger.error('Failed check: {}'.format(err)) return sess_info = common.read_file(session_info) a = [] # 5 min avg b = [] # 10 min avg c = [] # 15 min avg for s in sess_info: s = s.split(':') s = s[2].split(' ') del s[0] a.append(float(s[0])) b.append(float(s[1])) c.append(float(s[2])) common.plot_sess_info(a, b, c, len(a))
def part_1(): I = read_file('day13') ts = int(I[0]) buses = [int(x) for x in I[1].split(',') if x != 'x'] times = [b - (ts % b) for b in buses] val, i = min((v, i) for (i, v) in enumerate(times)) print(buses[i] * val)
def test_sockets_echo_bigdata(self): sockets_include = '-I' + test_file('sockets') # generate a large string literal to use as our message message = '' for i in range(256 * 256 * 2): message += str(chr(ord('a') + (i % 26))) # re-write the client test with this literal (it's too big to pass via command line) src = read_file(test_file('sockets/test_sockets_echo_client.c')) create_file( 'test_sockets_echo_bigdata.c', src.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)) harnesses = [(CompiledServerHarness( test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0'], 49172), 0), (CompiledServerHarness( test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=1'], 49173), 1)] if not common.EMTEST_LACKS_NATIVE_CLANG: harnesses += [(WebsockifyServerHarness( test_file('sockets/test_sockets_echo_server.c'), [], 49171), 0)] for harness, datagram in harnesses: with harness: self.btest_exit('test_sockets_echo_bigdata.c', args=[ sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram ])
def __init__(self, file_name, astergui, parent=None, **kwargs): """ Create editor. Arguments: file_name (str): File path. astergui (AsterGui): AsterGui instance. parent (Optional[QWidget]): Parent widget. Defaults to *None*. **kwargs: Keyword arguments. """ super(TextFileEditor, self).__init__(parent=parent, name=translate("AsterStudy", "Edit file"), astergui=astergui, **kwargs) self.file_name = file_name self.prev_state = _text2unicode(read_file(file_name)) title = translate("AsterStudy", "Edit file") + " '{}'" self.setWindowTitle(title.format(get_base_name(file_name))) self.setPixmap(load_pixmap("as_pic_edit_file.png")) self.editor = Q.QTextEdit(self) self.editor.setLineWrapMode(Q.QTextEdit.NoWrap) self.setLayout(Q.QVBoxLayout()) self.layout().setContentsMargins(0, 0, 0, 0) self.layout().addWidget(self.editor) self.editor.setPlainText(self.prev_state) self.editor.textChanged.connect(self.updateButtonStatus)
def main(): template_data = common.read_file(TEST_HTML_TEMPLATE_FILE) template = Template(template_data) for directory in DIRECTORIES_WITH_TESTS: for js_file_path in common.get_files_with_suffix( directory, "_test.js"): _gen_test_html(js_file_path, template)
def part_2(): I = read_file('day3') s1 = slope(I, 1, 1) s2 = slope(I, 1, 3) s3 = slope(I, 1, 5) s4 = slope(I, 1, 7) s5 = slope(I, 2, 1) print(s1 * s2 * s3 * s4 * s5)
def main(): """ Noise filtering from word embeddings Usage: python filter_noise_embs.py -input <original_embs_file> -output <denoising_embs_file> -bin <binary_file> -over <over_complete_embs_file> -iter <iteration> -bsize <batch_size> <original_embs_file>: the original word embeddings is used to learn denoising <denoising_embs_file>: the output name file of word denoising embeddings <over_complete_embs_file>: the overcomple word embeddings is used to learn overcomplete word denoising embeddings <binary_file>: 1 for binary; 0 for text """ parser = argparse.ArgumentParser() parser.add_argument('-input', type=str) parser.add_argument('-output', type=str) parser.add_argument('-over', action='store', default=False, dest='file_over') parser.add_argument('-iter', type=int) parser.add_argument('-bsize', type=int) parser.add_argument('-bin', type=int, default=1) args = parser.parse_args() vocab, vecs_in = read_file(args.input, binary=args.bin) if args.file_over is False: vecs_dict = np.load(args.input + '.dict_comp.npy') Q, S = initialize_parameters(vecs_dict) model = DeEmbs(vecs_in=vecs_in, batch_size=args.bsize, epochs=args.iter, Q=Q, S=S) else: vecs_dict = np.load(args.input + '.dict_overcomp.npy') Q, S = initialize_parameters(vecs_dict) vc, vecs_over = read_file(args.file_over, binary=args.bin) assert vocab == vc model = DeEmbs(vecs_in=vecs_in, vecs_over=vecs_over, batch_size=args.bsize, epochs=args.iter, Q=Q, S=S) vecs_out = model.fit() save_file(args.output, vocab, vecs_out, binary=args.bin)
def comment(environ, start_response): """ Add new comment on user. :param environ: :param start_response: :return: """ start_response('200 OK', [('Content-Type', 'text/html')]) return [read_file(os.path.join("static", "html", 'comment.html'))]
def index(environ, start_response): """ Home page. :param environ: :param start_response: :return: """ start_response('200 OK', [('Content-Type', 'text/html')]) return [read_file(os.path.join("static", "html", "index.html"))]
def store_ca_cert(fb, gid): # Get the CA certificate from file system cert = common.read_file('demoCA/cacert.pem') # Encode certificate for Facebook database data = common.fb_encode(cert) # Store on Facebook common.fb_put_data(fb, gid, data, 'certificate') return 0
def make_cert_request(fb): # Create the necessary request files create_req_files() # Get certificate request cert_req = common.read_file('/etc/racoon/certs/newreq.pem') # Store request on Facebook fb_store_req(fb, cert_req) return 0
def test_transform_features(): train_filename = 'tests/train_users_2.csv' info_dict = common.read_info_str(info_str) data = common.read_file(train_filename, info_dict) common.transform_features(info_dict, data) #print (result, file = open('test1', 'w')) expected_output = pd.read_csv(train_filename + '.expected', index_col= 'id', sep= '\t') data.to_csv(train_filename + '.actual', sep='\t') # can compare actual to expected files, but then have to deal with a few technical details # especially annoying is that float numbers may differ in the last decimal digit # assert_frame_equal automatically recognizes that 0.3 and 0.29999999999 are the same # so better to use pdt.assert_* instead of comparing files on disk pdt.assert_frame_equal(data, expected_output)
def sign_cert_req(req): # Write req to file fname = 'tmp_cert_req.pem' fname_cert = 'newcert.pem' common.write_file(fname, req) # Sign the certificate os.system('openssl ca -batch -policy policy_anything -key secret \ -in ' + fname + ' -out ' + fname_cert) # Read certificate signed_cert = common.read_file(fname_cert) return signed_cert
def get_output(msg='', myname='', sender='', channel='', command_prefix='.'): msg = msg.strip() cmdsplit_re = re.compile(r'\s+->\s+') importline_re = re.compile(r'(\w|\.)+?(,\s+(\w|\.)+?)*\s*$') # helpsplit_re = re.compile(r'\s+\?>\s+') lines = common.read_lineconf(common.read_file('commands')) cmdlines = [x for x in lines if cmdsplit_re.search(x) != None] importlines = [x for x in lines if importline_re.match(x) != None] imports = get_command_imports(importlines) format_answer = lambda t, arg: t.format(message=msg, myname=myname, channel=channel, sender=sender, arg=arg, qarg=quote(arg), c='['+command_prefix+']') exec_answer = lambda code, arg: eval(code, imports, {'arg':arg, 'qarg':quote(arg), 'message':msg, 'sender':sender, 'channel':channel, 'myname':myname}) # Fancy regexmagic! format_cmd = lambda t: format_answer(t, r'(?P<arg>.+?)') + '$' for line in cmdlines: try: rawcmd, rawanswer = cmdsplit_re.split(line) except ValueError: # TODO: some good error handling here continue cmd = format_cmd(rawcmd) cmd_match = re.match(cmd, msg) # ugly poc name but w/e :/ if cmd_match: if 'arg' in cmd_match.groupdict(): arg = cmd_match.group('arg') else: arg = '' if rawanswer.startswith('#py:'): answer = exec_answer(rawanswer[4:], arg) else: answer = format_answer(rawanswer, arg) return answer.split('[\\n]') return None
def get_ans(): cont = read_file('59_cipher1.txt') secret = map(int, cont.split(',')) space_ascii = ord(' ') space_xor_secret = secret[:] for i in xrange(len(space_xor_secret)): space_xor_secret[i] = secret[i] ^ space_ascii i = 0 for ascii in set(sorted(space_xor_secret)): find_counts = space_xor_secret.count(ascii) if find_counts < 50: continue print ascii, find_counts # Ascii Times # 100 77 # 103 70 # 111 86 # key = [111, 111, 111] => prefix message: " Tcm ", so we can guess key[1] = 111 # key = [103, 111, 103] => prefix message: "?Tke ", we found Tke, we can guess Tke is The. # And e's position is 3, 3 % 3 = 0. # So, key[0] = 103 maybe right. # key = [103, 111, 100] => prefix message: "?The " key_map = [103, 111, 100] test = [] for i in xrange(len(secret)): ascii = secret[i] ch = chr(ascii ^ key_map[i%3]) #if (ch>='A' and ch<='Z') or (ch>='a' and ch<='z') or (ch>='0' and ch<='9') or ch==' ' or ch=='.': # test += [ch] #else: # test += ['?'] test += [ch] print ''.join(test) ascii_plain = [] for ch in test: ascii_plain += [ord(ch)] return sum(ascii_plain)
def get_ans(): width = 80 m = [] for line in read_file('81_matrix.txt').splitlines(): m += [map(int, line.split(','))] dp = [] for i in xrange(width): dp += [[0] * width] dp[0][0] = m[0][0] for i in xrange(1, width): dp[0][i] = dp[0][i-1] + m[0][i] dp[i][0] = dp[i-1][0] + m[i][0] for i in xrange(1, width): for j in xrange(1, width): dp[i][j] = min(dp[i-1][j], dp[i][j-1]) + m[i][j] return dp[width - 1][width - 1]
def get_command_help(msg, sendernick, myname, command_prefix, plugins): msgparts = re.match(r'[{}]help(\s+(?P<nudgenick>@\S+)?\s*(?P<args>.+)*)?'.format(command_prefix), msg) if msgparts.group('args') == None: return ['{}: Hej Och Välkommen Till {}, En Vänlig Och Hjälpsam IRC (InterNet Relay Chat) "Bot"!'.format(sendernick, myname.title()), '{0}: Skriv \'{1}list commands\' eller \'{1}list plugins\' för en lista på kommandon/plugins'.format(sendernick, command_prefix), '{}: Skriv \'{}help <kommando>\' för hjälp med <kommando>.'.format(sendernick, command_prefix)] pluginname = msgparts.group('args') if msgparts.group('nudgenick'): nudgenick = msgparts.group('nudgenick')[1:] else: nudgenick = sendernick if pluginname in plugins: plugin = load_plugin(pluginname) try: info = plugin.help() return ['{}: {}: {}'.format(nudgenick, pluginname, info['description']), '{}: Användning: {}{} {}'.format(nudgenick, command_prefix, pluginname, info['argument'])] except NotImplementedError: return 'nån idiot har glömt att lägga in hjälptext i {}{}'.format(command_prefix, pluginname) else: helpsplit_re = re.compile(r'\s+\?>\s+?') lines = common.read_lineconf(common.read_file('commands')) helplines = [x for x in lines if helpsplit_re.search(x) != None] for h in helplines: try: cmd, args, desc = helpsplit_re.split(h) except ValueError: continue else: if pluginname == cmd.format(c=''): return ['{}: {}: {}'.format(nudgenick, pluginname, desc.format(myname=myname)), '{}: Användning: {} {}'.format(nudgenick, cmd.format(c=command_prefix), args).strip()] return '{}: finns inget sånt kommando'.format(sendernick)
def configure_ipop(gid): # Create files based on user input dhcpdata = common.read_file('configs/dhcpdata.conf') dhcpdata = dhcpdata.replace('UFGrid00', gid) # Files are saved to floppy to maintain compatibility with appliance common.write_file('/mnt/fd/dhcpdata.conf', dhcpdata) common.write_file('/mnt/fd/ipop_ns', gid) common.write_file('/usr/local/ipop/var/ipop_ns', gid) # Create ipop config file create_ipop_config('/usr/local/ipop/var/ipop.config',gid) # Define the type of server created by user os.system('cp type /mnt/fd/type') # Update the node config file os.system('cp /mnt/fd/node.config /usr/local/ipop/var/node.config') # Add racoon to ipop restart os.system('echo \'/etc/init.d/racoon restart\' >> /etc/init.d/ipop.sh') return 0
def get_ans(): keylogers = map(int, read_file('79_keylog.txt').splitlines()) return analyze_keylogers(keylogers)
from common import read_file from math import log exps = read_file('base_exp.txt') maxl = 0 maxi = 0 i = 0 for numerbs in exps: i += 1 (base, exp) = map(int, numerbs.split(',')) l = exp * log(base) print base, exp, l if maxl == 0: maxl = l maxi = i continue if maxl / l < 1: maxl = l maxi = i print maxi
def process_feed_file(filename): #Lookup keys. brown_id_map = get_brown_id_map() in_data = read_file(filename) error_count = 0 out = {} for n, row in enumerate(in_data): brown_id = row.get('Brown_ID') last = row.get('Preferred_Last_Name') first = row.get('Preferred_First_Name') middle = row.get('Preferred_Middle_Name') #remove trailing periods. if middle: middle = middle.rstrip('.') title = row.get('Current_Person_Title').rstrip('\n').rstrip('\r') email = row.get('Employee_Email') short_id = row.get('AUTH_ID') #Make a string for logging errors. estring = "Row %s. %s" % ( str(n + 1), " ".join([f for f in [short_id, first, last, title, email] if f is not None]) ) #Stop process if too many errors are found. if error_count > ERROR_MAX: logging.error("Too many errors. Exiting.") raise Exception("Too many missing auth ids to proceed. Verify faculty file.") if short_id is None: fail = False try: #Try to fetch short_id from LDAP short_id = brown_id_map[brown_id]['short_id'] except IndexError: fail = True except KeyError: fail = True if (short_id is None) or (fail is True): logging.warning("Unable to lookup faculty by key. %s." % (estring)) error_count += 1 continue if (first is None): logging.error("Missing first name. %s." % (estring)) continue if (last is None): logging.error("Missing last name. %s" % (estring)) continue label = "{0}, {1}".format(last, first) uri = make_uri(short_id) d = {} d['brown_id'] = brown_id d['uri'] = uri d['label'] = label d['first'] = first d['last'] = last d['middle'] = middle d['title'] = title d['email'] = email d['shortId'] = short_id #Add other FIS data that might be needed later here. d['workdayID'] = row.get('Workday_ID') out[short_id] = d if (DEBUG is True) and (n > 15): break return out
from common import read_file from math import sqrt inpt = read_file("triangles.txt") def length(x1, y1, x2, y2): return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) def square(a, b, c): s = (a + b + c) / 2 return sqrt(s * (s - a) * (s - b) * (s - c)) def contains_origin(x1, y1, x2, y2, x3, y3): o21 = length(x1, y1, 0, 0) o22 = length(x2, y2, 0, 0) o23 = length(x3, y3, 0, 0) _122 = length(x1, y1, x2, y2) _223 = length(x2, y2, x3, y3) _123 = length(x1, y1, x3, y3) x = square(o21, o22, _122) + square(o21, o23, _123) + square(o22, o23, _223) y = square(_122, _223, _123) return int(abs(x - y)) == 0 k = 0
return True elif r.status == 302 and location and location.find('/404') >= 0: return False else: raise Exception('unexpected response from server - that could be interesting') NAROD_DB = 'DB/narod' total = 0 passed = 0 failed = 0 assert os.path.exists(NAROD_DB) for file in os.listdir(NAROD_DB): if common.check_hex_digest(file): album = json.loads(common.read_file(os.path.join(NAROD_DB, file))) url = album['url'] ok = check_url(album['url']) sys.stderr.write('.') if total % 10 == 0: sys.stderr.write('.%s.' % total) if ok: passed += 1 else: failed += 1 #ok_str = 'ok' if ok else '404' #print '%-4s %s %s ...' % (ok_str, album['album_hash'], url[0:40]) sys.stderr.write('.%s.' % album['album_hash']) sleep(1) total += 1 print ''
""" candidates = [[range(1,10) for j in range(GRID_SIZE)] for i in range(GRID_SIZE)] for i in range(GRID_SIZE): for j in range(GRID_SIZE): candidate = grid[i][j] if candidate != 0: candidates[i][j] = [] remove_candidate(candidates, i, j, candidate) number_of_set = sum([sum(map(lambda x: 0 if x == 0 else 1, x)) for x in grid]) return solve_grid_q(candidates, grid, GRID_SIZE * GRID_SIZE - number_of_set) inputData = read_file("sudoku.txt") result = 0 for i in range(len(inputData)): line = inputData[i] if line[0] == 'G': grid = [] for gridline in inputData[i + 1: i + 10]: grid.append(map(int, gridline)) solution = solve_sudoku(grid) top = solution[0] result += top[0] * 100 + top[1] * 10 + top[2] i += 10 print result
digits_to_chars = {str(i):[] for i in range(10)} for i in range(len(word)): digits_to_chars[digits[i]].append(word[i]) return max(map(len, digits_to_chars.values())) == 1 def find_max_square(anagrams, squares): m = 0 for words in anagrams.values(): for i in range(len(words)): for j in range(i + 1, len(words)): word1 = words[i] word2 = words[j] l = len(word1) word_squares = squares[l] for n in word_squares: x = number_anagram(word1, word2, n) if x in word_squares and chars_differ(word1, n): m = max(m, max(n, x)) return m words = read_file("words.txt") anagrams = build_anagrams(words) squares = build_squares(anagrams) print find_max_square(anagrams, squares)