def test_build_docs(testdirectory): cpp_coffee = testdirectory.copy_dir(directory="test/data/cpp_coffee") # Make it a git repo - we will try to fetch the project root from git cpp_coffee.run(["git", "init"]) cpp_coffee.run(["git", "add", "."]) cpp_coffee.run([ "git", "-c", "user.name=John", "-c", "[email protected]", "commit", "-m", "oki", ]) docs = cpp_coffee.join("docs") r = docs.run("sphinx-build --no-color -w log.txt -vvv -b html . _build") # print(r) log_file = os.path.join(docs.path(), "log.txt") # The log file should have zero size - i.e. now warnings or errors.. # As you can see we are not quite there :) with open(log_file, "r") as log: log_data = log.read() mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="log.txt", recording_path="test/data/log_recordings", mismatch_path=mismatch_path.path(), ) recorder.record(data=log_data) # Find and track changes to the wurfapi json file. This is the final API # output produced after parsing the sources and running our various steps # to transform the output. wurfapi_json_file = os.path.join(docs.path(), "_build", ".doctrees", "wurfapi_api.json") recorder = record.Record( filename="build_coffee_wurfapi.json", recording_path="test/data/", mismatch_path=mismatch_path.path(), ) with open(wurfapi_json_file, "r") as wurfapi_json: data = json.load(wurfapi_json) recorder.record(data=data)
def test_coffee(testdirectory, caplog): caplog.set_level(logging.DEBUG) src_dirs, xml_dir = generate_coffee_xml(testdirectory) log = logging.getLogger(name='test_coffee') parser = wurfapi.doxygen_parser.DoxygenParser( doxygen_path=xml_dir, project_paths=src_dirs, # Patch fix Doxygen bug reported here: # https://bit.ly/2BWPllZ patch_api=[{ 'selector': 'project::v1_0_0::coffee::machine::impl', 'key': 'access', 'value': 'private' }], log=log) api = parser.parse_index() mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='coffee.json', recording_path='test/data/parser_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=api)
def test_parser_input_templates(testdirectory, caplog): caplog.set_level(logging.DEBUG) src_dir, xml_dir = generate_xml( testdirectory, source_file="test/data/parser_input/templates.hpp") log = logging.getLogger(name="test_parser_templates") mapper = wurfapi.location_mapper.LocationMapper(project_root=src_dir, include_paths=[], log=log) parser = wurfapi.doxygen_parser.DoxygenParser(doxygen_path=xml_dir, location_mapper=mapper, patch_api=[], log=log) api = parser.parse_index() mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="parser_input_templates.json", recording_path="test/data/parser_recordings", mismatch_path=mismatch_path.path(), ) recorder.record(data=api)
def test_template_render_namespace(testdirectory): template = wurfdocs.template_render.TemplateRender(user_path=None) api = { "test::ok": { "briefdescription": "", "name": "ok", "location": { "file": "ok" } } } data = template.render(selector='test::ok', api=api, filename='namespace_synopsis.rst') mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='builtin_namespace_synopsis.rst', recording_path='test/data/template_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=data)
def test_template_finder_builtin(testdirectory): template = wurfapi.template_render.TemplateRender(user_path=None) api = generate_coffee_api(testdirectory=testdirectory) testdirectory.write_text( filename="api.json", data=json.dumps(api, indent=4, sort_keys=True), encoding="utf-8", ) data = template.render( selector="project::v1_0_0::coffee::machine", api=api, filename="class_synopsis.rst", ) testdirectory.write_text(filename="out.rst", data=data, encoding="utf-8") # testdirectory.run('rstcheck out.rst') mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="builtin_class_synopsis.rst", recording_path="test/data/template_recordings", mismatch_path=mismatch_path.path(), ) recorder.record(data=data)
def test_coffee(testdirectory, caplog): caplog.set_level(logging.DEBUG) coffee_dir, src_dirs, xml_dir = generate_coffee_xml(testdirectory) log = logging.getLogger(name="test_coffee") mapper = wurfapi.location_mapper.LocationMapper(project_root=coffee_dir, include_paths=[], log=log) parser = wurfapi.doxygen_parser.DoxygenParser( doxygen_path=xml_dir, location_mapper=mapper, # Patch fix Doxygen bug reported here: # https://bit.ly/2BWPllZ patch_api=[{ "selector": "project::v1_0_0::coffee::machine::impl", "key": "access", "value": "private", }], log=log, ) api = parser.parse_index() mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="coffee.json", recording_path="test/data/parser_recordings", mismatch_path=mismatch_path.path(), ) recorder.record(data=api)
def __init__(self): handlers = [(r"/", WriteRecordHandler), (r"/writeRecord", WriteRecordHandler), (r"/readRecord", ReadRecordHandler)] self.record_mod = record.Record(options.db_addr, options.db_port) tornado.web.Application.__init__(self, handlers, debug=False)
def edit_entry(): #validation of date inputDate = input("Enter the date in format 'YYYY-MM-DD': ") year, month, day = inputDate.split('-') isValidDate = True try: datetime.datetime(int(year), int(month), int(day)) except ValueError: isValidDate = False if (isValidDate): print("\nInput date is VALID ...\n") inputCategoty = input("Enter new category: ") inputDescription = input("Enter new description: ") objectEdited = record.Record(inputDate, inputCategoty, inputDescription) db.Insert(objectEdited) else: print("\nInput date is NOT VALID... \n") edit_entry() welcome()
def test_parser_input_function(testdirectory, caplog): caplog.set_level(logging.DEBUG) src_dir, xml_dir = generate_xml( testdirectory, source_file='test/data/parser_input/function.hpp') log = logging.getLogger(name='test_parser_input_function') mapper = wurfapi.location_mapper.LocationMapper(project_root=src_dir, include_paths=[], log=log) parser = wurfapi.doxygen_parser.DoxygenParser(doxygen_path=xml_dir, location_mapper=mapper, patch_api=[], log=log) api = parser.parse_index() mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='parser_input_function.json', recording_path='test/data/parser_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=api)
def process_file(filename): with open(os.path.join(data_directory, filename), 'r') as f: json_data = json.load(f) table = str.maketrans(' ', ' ', string.punctuation) # Get color try: color = json_data['colors'][0]['name'] except: color = '' image_filename = data_directory + filename.replace('.txt', '.jpg') description = json_data['description'] + color description = BeautifulSoup(description, "lxml").get_text() title = json_data['name'] title = BeautifulSoup(title, "lxml").get_text() raw_category = json_data["categories"][0]["fullName"] product_url = json_data['clickUrl'] #category = get_category(title) category = get_category(simplify_text(raw_category)) if category is None: return pos = positive_tags(category, simplify_text(description)) neg = negative_tags(pos) return record.Record(image_filename, title, description, product_url, raw_category, category, pos, neg)
def getting_fields() -> rec.Record or None: """ Функция для объединения функций ввода данных соответствующего поля ползователем. В случае успешной работы функции возвращается новая запись (rec. None), иначе None. :return: если без ошибок, то rec.Record, иначе None. """ firstname, err = get_firstname() if err: return lastname, err = get_lastname() if err: return phone, err = get_phone() if err: return birthday, err = get_birthday() if err != ms.SKIP and err is not None: return new_record = rec.Record(firstname=firstname, lastname=lastname, phone=phone, birthday=birthday) return new_record
def new_entry(): datee = str(date.today()) category = input("Enter category: ") description = input("Description: ") newEntryData = record.Record(datee, category, description) db.Insert(newEntryData) restart()
def test_build_docs(testdirectory): cpp_coffee = testdirectory.copy_dir(directory='test/data/cpp_coffee') # Make it a git repo - we will try to fetch the project root from git cpp_coffee.run(['git', 'init']) cpp_coffee.run(['git', 'add', '.']) cpp_coffee.run([ 'git', '-c', 'user.name=John', '-c', '[email protected]', 'commit', '-m', 'oki' ]) docs = cpp_coffee.join('docs') docs.run('sphinx-build --no-color -w log.txt -vvv -b html . _build') log_file = os.path.join(docs.path(), 'log.txt') # The log file should have zero size - i.e. now warnings or errors.. # As you can see we are not quite there :) with open(log_file, 'r') as log: log_data = log.read() mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='log.txt', recording_path='test/data/log_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=log_data) # Find and track changes to the wurfapi json file. This is the final API # output produced after parsing the sources and running our various steps # to transform the output. wurfapi_json_file = os.path.join(docs.path(), '_build', '.doctrees', 'wurfapi_api.json') recorder = record.Record(filename='build_coffee_wurfapi.json', recording_path='test/data/', mismatch_path=mismatch_path.path()) with open(wurfapi_json_file, 'r') as wurfapi_json: data = json.load(wurfapi_json) recorder.record(data=data)
def scrape_about(self, targeturl): target = get_target(targeturl) rec = record.Record(self._output_file(target, 'about'), ['section', 'text']) def callback(section, content): rec.add_record({'section': section, 'text': content}) log.info('Scraped section %s with the following text:\n#### START ####\n%s\n#### END ####', section, content) self.crawl_about(targeturl, callback)
def __init__(self): handlers = [(r"/writeDirtyRecord", WriteDirtyRecordHandler), (r"/readRecord", ReadRecordHandler), (r"/cmd", CommandHandler)] self.record_mod = record.Record(options.db_addr, options.db_port) self.running = True server_log.info('record server start on db[' + options.db_addr + ':' + str(options.db_port) + ']') tornado.web.Application.__init__(self, handlers, debug=False)
def scrape_checkins(self, targeturl): target = get_target(targeturl) rec = record.Record(self._output_file(target, 'checkins'), ['name', 'url']) def callback(name, url, i): rec.add_record({'name': name, 'url': url}) log.info('Scraped check in %d: %s', i, name) scraped = self.crawl_checkins(targeturl, callback) log.info('Scraped %d checkins into %s', scraped, rec.filename)
def scrape_likes(self, targeturl): target = get_target(targeturl) rec = record.Record(self._output_file(target, 'likes'), ['name', 'url']) log.info('Scraping likes into %s', rec.filename) def callback(name, page_url, i): rec.add_record({'name': name, 'url': page_url}) log.info('Scraped like %d: %s', i, name) likes_scraped = self.crawl_likes(targeturl, callback) log.info('Scraped %d likes into %s', likes_scraped, rec.filename)
def scrape_event_guests(self, eventurl, guest_filter=None): rec_name = path_safe(urlparse(eventurl).path) rec_name = os.path.join(self.output_dir, rec_name) rec = record.Record(rec_name, ['response', 'name', 'profile']) def callback(label, name, url, imgurl, i): rec.add_record({'response': label, 'name': name, 'profile': url}) log.info('%s is %s', name, label) scraped = self.crawl_event_guests(eventurl, callback, guest_filter) log.info('Scraped %d invitees for event %s', scraped, eventurl)
def main(): donor_info_dic = {} # A dictionary that stores the donor id as the key and the earliest # transaction date for that donor id as the value. results = {} with open(sys.argv[1], 'r') as in_file, open(sys.argv[3], 'a') as out_file: # Open and read the file with the percentile value (1-100) p_file = data_utils.set_up_percentile_input(sys.argv[2]) percentile = data_utils.get_percentile_input(p_file) for raw_line in in_file: line = data_utils.split_line(raw_line) donation_record = record.Record(line[0], line[7], line[10], line[13], line[14], line[15]) # Check whether the reocrd should be skipped if donation_record.exam_fields() is True: donor_info = donation_record.get_donor_info() donor_id = donor_info[0] donation_date = donor_info[1] # Check if the donor is a repeat donor if donor_id not in donor_info_dic: donor_info_dic[donor_id] = donation_date else: # If the donor is a repeat donor, check if the transaction # date is before the date of the earliest transaction # came in the file for that donor if donor_info_dic[donor_id] > donation_date: # If the transaction date is before the earliest # date in the file, ignore the record but update # the earliest date. donor_info_dic[donor_id] = donation_date else: output_id = donation_record.get_output_id() transaction_amt = float( donation_record.transaction_amt) # Check if there is a record associates with the output_id if output_id not in results: results[ output_id] = analytic_result.AnalyticResult( output_id, transaction_amt, percentile) else: results[output_id].update_result(transaction_amt) output_line = str(results[output_id]) out_file.write(output_line + '\n')
def scrape_friends(self, targeturl): target = get_target(targeturl) rec = record.Record(self._output_file(target, 'friends'), ['name', 'profile']) log.info('Scraping friends into %s', rec.filename) def callback(name, url, imgurl, i): friend_url = strip_query(url) rec.add_record({'name': name, 'profile': friend_url}) log.info('Scraped friend %d: %s', i, name) friends_scraped = self.crawl_friends(targeturl, callback) log.info('Scraped %d friends into %s', friends_scraped, rec.filename)
def test_record_no_mapping(testdirectory): recording_path = testdirectory.mkdir('recording') mismatch_path = testdirectory.mkdir('mismatch') with pytest.raises(NotImplementedError): recorder = record.Record(filename='test.tar.gz', recording_path=recording_path.path(), mismatch_path=mismatch_path.path()) recorder.record(data="{'foo': 2, 'bar': 3}")
def set_tag_value(self, tagname, value): try: tag = self.__mytagmgr.get_tag(tagname) snapshot = record.Record(int(time.time()), 0) snapshot.quality = 192 snapshot.value = value snapshot.tagtype = tag.tagtype error_code = tag.save_snapshot(snapshot) if error_code != hyperdb.hd_sucess: print 'Failed to set tag value: {0}={1}'.format(tagname, value) except: print 'Failed to set tag value: {0}={1}'.format(tagname, value)
def choose_record(self): records = self.adaptor.get_all_values() logging.info("got all records") idx = random.randint(0, len(records) - 1) str_num = str(idx + 1) logging.info(f'\"{records[idx][0]}\" was chosen') return rec.Record(\ num=str_num,\ word=records[idx][0],\ category=records[idx][1],\ mean=records[idx][2],\ supplement=records[idx][3],\ created_at=records[idx][4])
def test_template_render_namespace(testdirectory): template = wurfapi.template_render.TemplateRender(user_path=None) api = generate_coffee_api(testdirectory=testdirectory) data = template.render(selector='project::v1_0_0', api=api, filename='namespace_synopsis.rst') mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record( filename='builtin_namespace_synopsis.rst', recording_path='test/data/template_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=data)
def test_template_finder_builtin(testdirectory): template = wurfdocs.template_render.TemplateRender(user_path=None) api = generate_coffee_api(testdirectory=testdirectory) data = template.render(selector="project::coffee::machine", api=api, filename='class_synopsis.rst') mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='builtin_class_synopsis.rst', recording_path='test/data/template_recordings', mismatch_path=mismatch_path.path()) recorder.record(data=data)
def test_record_rst(testdirectory): recording_path = testdirectory.mkdir('recording') mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='test.rst', recording_path=recording_path.path(), mismatch_path=mismatch_path.path()) recorder.record(data="Hello\n=====\nWorld") # Calling again with same data should be fine recorder.record(data="Hello\n=====\nWorld") # With new data should raise with pytest.raises(record.RecordError): recorder.record(data="Hello\n=====\nWurfdocs")
def test_record_json(testdirectory): recording_path = testdirectory.mkdir('recording') mismatch_path = testdirectory.mkdir('mismatch') recorder = record.Record(filename='test.json', recording_path=recording_path.path(), mismatch_path=mismatch_path.path()) recorder.record(data={'foo': 2, 'bar': 3}) # Calling again with same data should be fine recorder.record(data={'foo': 2, 'bar': 3}) recorder.record(data={'bar': 3, 'foo': 2}) # With new data should raise with pytest.raises(record.RecordError): recorder.record(data={'foo': 3, 'bar': 3})
def test_delete(self, paras, expected): st = setup.Setup() lg = login.Login() lg.set_driver(st.driver) driver = lg.login_action('admin', 'admin') driver.find_element_by_link_text('※ 会议记录 ※').click() re = record.Record() re.set_driver(driver) re.dele(paras) time.sleep(2) assert_type = expected.split('=')[0] expected_str = expected.split('=')[1] # 根据excel中的字段判断调用哪种断言 if assert_type == 'assert_msg': self.assert_add_msg(driver, expected_str) driver.quit()
def test_record_rst(testdirectory): recording_path = testdirectory.mkdir("recording") mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="test.rst", recording_path=recording_path.path(), mismatch_path=mismatch_path.path(), ) recorder.record(data="Hello\n=====\nWorld") # Calling again with same data should be fine recorder.record(data="Hello\n=====\nWorld") # With new data should raise with pytest.raises(record.RecordError): recorder.record(data="Hello\n=====\nwurfapi")
def test_template_render_namespace(testdirectory): template = wurfapi.template_render.TemplateRender(user_path=None) api = generate_coffee_api(testdirectory=testdirectory) data = template.render( selector="project::v1_0_0", api=api, filename="namespace_synopsis.rst" ) mismatch_path = testdirectory.mkdir("mismatch") recorder = record.Record( filename="builtin_namespace_synopsis.rst", recording_path="test/data/template_recordings", mismatch_path=mismatch_path.path(), ) recorder.record(data=data)