def __init__(self, yaml): #yaml:文件名 if os.path.exists(Path().get_config_path(yaml)): self.yaml = Path().get_config_path(yaml) else: raise FileNotFoundError('配置文件不存在') self._data = None
def __init__(self, dirname: str) -> None: """ :param dirname: Directory within the `raw` directory to collect photos from Photos will be collected in this dirname in the `photos` directory """ self.raw_dir = Path.to_raw_photos_dir(dirname) self.photos_dir = Path.to_src_photos_dir(dirname)
def __init__(self, excel, sheet=0, title_line=True): if os.path.exists(os.path.join(Path().basepath, 'data', excel)): self.excel = os.path.join(Path().basepath, 'data', excel) else: raise FileNotFoundError('文件不存在!') self.sheet = sheet self.title_line = title_line self._data = list()
def __init__(self, package, file_obj): """ 'style_dir' is the directory where we can copy the stylesheets from 'output_dir' is the directory that will be [over]written with the website """ static_dir = Path(settings.STATIC_ROOT) self.package = package self.style_dir = static_dir / "css" / "styles" / package.style self.scripts_dir = static_dir / "scripts" self.pages = [] self.file_obj = file_obj self.media_dir = Path(package.user.get_profile().media_path) self.page_class = WebsitePage self.output_dir = Path(tempfile.mkdtemp())
def save(self): """ Save a imsmanifest file and metadata to self.output_dir """ filename = "imsmanifest.xml" out = open(self.output_dir / filename, "wb") out.write(self.createXML().encode('utf8')) out.close() # if user did not supply metadata title, description or creator # then use package title, description, or creator in imslrm # if they did not supply a package title, use the package name lrm = model_to_dict(self.package.dublincore) if lrm.get('title', '') == '': lrm['title'] = self.package.title if lrm['title'] == '': lrm['title'] = self.package.name if lrm.get('description', '') == '': lrm['description'] = self.package.description if lrm['description'] == '': lrm['description'] = self.package.title if lrm.get('creator', '') == '': lrm['creator'] = self.package.author # Metadata templateFilename = Path( settings.STATIC_ROOT) / 'templates' / 'dublincore.xml' template = open(templateFilename, 'rb').read() xml = template % lrm out = open(self.output_dir / 'dublincore.xml', 'wb') out.write(xml.encode('utf8')) out.close()
def set_file(self, filename): if filename != '': file_path = Path().basepath + filename self.files = {'file': open(file_path, 'rb')} if filename == '' or filename is None: self.state = 1
class OsUtilsTestCase(TestCase): dirname = 'OsUtilsTestCase' photos_dir = Path.to_src_photos_dir(dirname) def setUp(self) -> None: if os.path.exists(self.photos_dir): shutil.rmtree(self.photos_dir) def test_that_ensure_empty_dir_creates_dir(self): self.assertFalse(os.path.exists(self.photos_dir)) ensure_empty_dir(self.photos_dir) self.assertTrue(os.path.exists(self.photos_dir)) def test_that_ensure_empty_dir_empties_dir(self): # Place a file in the directory to be emptied os.mkdir(self.photos_dir) src_full_path = Path.to_testphoto('wolf_low_res.jpg') dst_full_path = os.path.join(self.photos_dir, 'wolf.jpg') shutil.copyfile(src_full_path, dst_full_path) present_files = [entry.name for entry in os.scandir(self.photos_dir)] self.assertListEqual(['wolf.jpg'], present_files) ensure_empty_dir(self.photos_dir) present_files = [entry.name for entry in os.scandir(self.photos_dir)] self.assertListEqual([], present_files)
def copy_resources(self): view_media = [] for page in self.pages: view_media += page.view_media._js view_media += page.view_media._css.get('all', []) view_media = [medium.replace(settings.STATIC_URL, "") \ for medium in view_media] Path(settings.STATIC_ROOT).copylist(view_media, self.output_dir) self.media_dir.copylist(self.package.resources, self.output_dir)
def write_excel_xls(filename, sheet_name, value): index = len(value) # 获取需要写入数据的行数 workbook = xlwt.Workbook() # 新建一个工作簿 sheet = workbook.add_sheet(sheet_name) # 在工作簿中新建一个表格 for i in range(0, index): for j in range(0, len(value[i])): sheet.write(i, j, value[i][j]) # 像表格中写入数据(对应的行和列) path = os.path.join(Path().basepath, 'result', filename) workbook.save(path) # 保存工作簿
def get_pwd_cookie(type): if type == 'mul': set_pwd_cookie(1) else: set_pwd_cookie(2) with open(Path().get_cookie_path(), 'r') as f: text = f.read() f.close() return eval(text)
def test_that_pixelate_returns_pixelated_photo(self): pixelated_wolf = self.creator.pixelate(nr_pixels_in_x=50, nr_pixels_in_y=35) output_file = Path.to_testphoto('wolf_pixelated_50_35.bmp') if self.RESET_EXPECTED_OUTPUT: pixelated_wolf.save(output_file) # Expected image is a bitmap, to prevent jpeg artefacts in comparison expected_pixelated_wolf = Photo.open(output_file) self.assertEqual(expected_pixelated_wolf, pixelated_wolf)
def __init__(self, *args, **kwargs): """ Initialize 'style_dir' is the directory from which we will copy our style sheets (and some gifs) """ super(IMSExport, self).__init__(*args, **kwargs) static_dir = Path(settings.STATIC_ROOT) self.templatesDir = static_dir / "templates" self.schemasDir = static_dir / "schemas/ims" self.page_class = IMSPage
def test_that_ensure_empty_dir_empties_dir(self): # Place a file in the directory to be emptied os.mkdir(self.photos_dir) src_full_path = Path.to_testphoto('wolf_low_res.jpg') dst_full_path = os.path.join(self.photos_dir, 'wolf.jpg') shutil.copyfile(src_full_path, dst_full_path) present_files = [entry.name for entry in os.scandir(self.photos_dir)] self.assertListEqual(['wolf.jpg'], present_files) ensure_empty_dir(self.photos_dir) present_files = [entry.name for entry in os.scandir(self.photos_dir)] self.assertListEqual([], present_files)
def set_pwd_cookie(type): if str(type) == '2': url = url_pwd_url_s else: url = url_pwd_url s = requests.session() response = s.post(url) cookies = response.cookies.get_dict() with open(Path().get_cookie_path(), 'w') as f: f.write(str(cookies)) f.close()
def test_that_photo_pixelate_returns_photo_pixelated_photo(self): random.seed(1) pixelated_wolf = self.creator.photo_pixelate(src_dir=os.path.join( Path.testdata, 'cats'), nr_pixels_in_x=30, nr_pixels_in_y=30) output_file = Path.to_testphoto('wolf_photo_pixelated_30_30.bmp') if self.RESET_EXPECTED_OUTPUT: pixelated_wolf.save(output_file) # Expected image is a bitmap, to prevent jpeg artefacts in comparison expected_pixelated_wolf = Photo.open(output_file) self.assertEqual(expected_pixelated_wolf, pixelated_wolf)
def __init__(self, *args, **kwargs): """ Initialize 'styleDir' is the directory from which we will copy our style sheets (and some gifs) """ if "scorm_type" in kwargs: self.scorm_type = kwargs['scorm_type'] del kwargs["scorm_type"] else: raise TypeError("ScormExport requires a kw argument scorm_type") super(ScormExport, self).__init__(*args, **kwargs) static_dir = Path(settings.STATIC_ROOT) self.imagesDir = static_dir / "images" self.templatesDir = static_dir / "templates" self.schemasDir = static_dir /"schemas" self.hasForum = False self.page_class = ScormPage
def write_mul_insured(value): index = len(value) # 获取需要写入数据的行数 path = os.path.join(Path().basepath, 'result', filename) try: workbook = xlrd.open_workbook(path) # 打开工作簿 sheets = workbook.sheet_names() # 获取工作簿中的所有表格 worksheet = workbook.sheet_by_name(sheets[0]) # 获取工作簿中所有表格中的的第一个表格 rows_old = worksheet.nrows # 获取表格中已存在的数据的行数 new_workbook = copy(workbook) # 将xlrd对象拷贝转化为xlwt对象 new_worksheet = new_workbook.get_sheet(0) # 获取转化后工作簿中的第一个表格 for i in range(0, index): for j in range(0, len(value[i])): new_worksheet.write(i + rows_old, j, value[i][j]) # 追加写入数据,注意是从i+rows_old行开始写入 new_workbook.save(path) # 保存工作簿 except: book_name_xls = filename sheet_name_xls = 'result' value_title = [["省份", "城市", "保险公司"]] write_excel_xls(book_name_xls, sheet_name_xls, value_title) write_excel_xls_append(value)
def createMetaData(self): """ if user did not supply metadata title, description or creator then use package title, description, or creator in imslrm if they did not supply a package title, use the package name if they did not supply a date, use today """ if self.scorm_type == SCORM12: template_name = 'imslrm.xml' elif self.scorm_type == COMMONCARTRIDGE: template_name = 'cc.xml' else: raise AttributeError("Can't create metadata for %s" \ % self.scorm_type) static_dir = Path(settings.STATIC_ROOT) templateFilename = static_dir / 'templates' / template_name template = open(templateFilename, 'rb').read() lrm = model_to_dict(self.package.dublincore) if lrm.get('title', '') == '': lrm['title'] = self.package.title if lrm['title'] == '': lrm['title'] = self.package.name if lrm.get('description', '') == '': lrm['description'] = self.package.description if lrm['description'] == '': lrm['description'] = self.package.title if lrm.get('creator', '') == '': lrm['creator'] = self.package.author if lrm['date'] == '': lrm['date'] = time.strftime('%Y-%m-%d') # if they don't look like VCARD entries, coerce to fn: for f in ('creator', 'publisher', 'contributors'): if re.match('.*[:;]', lrm[f]) == None: lrm[f] = u'FN:' + lrm[f] xml = template % lrm return xml
def get_logger(self): """在logger中添加日志句柄并返回,如果logger已有句柄,则直接返回 我们这里添加两个句柄,一个输出日志到控制台,另一个输出到日志文件。 两个句柄的日志级别不同,在配置文件中可设置。 """ if not self.logger.handlers: # 避免重复日志 console_handler = logging.StreamHandler() console_handler.setFormatter(self.formatter) console_handler.setLevel(self.console_output_level) self.logger.addHandler(console_handler) # 每天重新创建一个日志文件,最多保留backup_count份 file_handler = TimedRotatingFileHandler( filename=os.path.join(Path().get_log_path(), self.log_file_name), when='D', interval=1, backupCount=self.backup_count, delay=True, encoding='utf-8') file_handler.setFormatter(self.formatter) file_handler.setLevel(self.file_output_level) self.logger.addHandler(file_handler) return self.logger
class PhotoCollectorTestCase(TestCase): dirname = 'PhotoCollectorTestCase' photos_dir = Path.to_src_photos_dir(dirname) raw_dir = Path.to_raw_photos_dir(dirname) def setUp(self) -> None: """ Ensure that dirs photos/PhotoCollectorTestCase and raw/PhotoCollectorTestCase do not exist before tests start """ for test_dir in (self.photos_dir, self.raw_dir): if os.path.exists(test_dir): shutil.rmtree(test_dir) def tearDown(self) -> None: """ Ensure that dirs photos/PhotoCollectorTestCase and raw/PhotoCollectorTestCase do not exist after tests are done """ for test_dir in (self.photos_dir, self.raw_dir): if os.path.exists(test_dir): shutil.rmtree(test_dir) def test_that_collect_files_gathers_files_from_single_directory(self): self.setup_raw_dir_structure() collector = PhotoCollector(self.dirname) collector._clean_photos_dir() collector._collect_files() present_files = sorted(entry.name for entry in os.scandir(self.photos_dir)) self.assertListEqual(['00000001.jpg', '00000002.jpg', '00000003.jpg', '00000004.jpg', '00000005.jpg', '00000006.jpg'], present_files) def test_that_split_by_size_ratio_results_in_correct_folder_structure(self): self.setup_photo_dir_structure() collector = PhotoCollector(self.dirname) most_common_resolution = collector._split_by_size_ratio() # Five photos are 455x455 (1:1), one photo is 474x296 (1:1.601) self.assertEqual(100, most_common_resolution) dirname = os.path.join(self.photos_dir, '100') nr_files_in_dir = sum(1 for _ in os.scandir(dirname)) self.assertEqual(5, nr_files_in_dir) dirname = os.path.join(self.photos_dir, '160') nr_files_in_dir = sum(1 for _ in os.scandir(dirname)) self.assertEqual(1, nr_files_in_dir) def test_that_select_images_selects_all_correct_images(self): self.setup_photo_dir_structure() collector = PhotoCollector(self.dirname) collector._split_by_size_ratio() mosaic_dir = os.path.join(self.photos_dir, 'mosaic') self.assertFalse(os.path.exists(mosaic_dir)) collector._select_images(desired_size_ratio=100) self.assertTrue(os.path.exists(mosaic_dir)) nr_files_in_dir = sum(1 for _ in os.scandir(mosaic_dir)) self.assertEqual(5, nr_files_in_dir) def test_that_resize_images_resizes_all_images(self): self.setup_photo_dir_structure() collector = PhotoCollector(self.dirname) collector._split_by_size_ratio() collector._select_images(desired_size_ratio=100) # Crop the first image, such that the original is not square anymore mosaic_dir = os.path.join(self.photos_dir, 'mosaic') first_cat = os.path.join(mosaic_dir, '00000001.jpg') img = Photo.open(first_cat) img = img.crop((0, 0, 400, 200)) img.save(first_cat) collector._resize_images(desired_size_ratio=100, desired_width=100) for file in os.scandir(mosaic_dir): img = Photo.open(file.path) desired_size = (100, 100) self.assertTupleEqual(desired_size, img.size) def setup_raw_dir_structure(self): """ Create the following directory structure and files inside `raw/PhotoCollectorTestCase`: cat_even/ cat002.JPG cat004.JPG cat_odd/ first_cat/ cat001.JPG cat003.JPG cat005.JPG """ cat_even = os.path.join(self.raw_dir, 'cat_even') cat_odd = os.path.join(self.raw_dir, 'cat_odd') first_cat = os.path.join(cat_odd, 'first_cat') for raw_dir in (self.raw_dir, cat_even, cat_odd, first_cat): os.mkdir(raw_dir) cat002 = os.path.join(cat_even, 'cat002.JPG') cat004 = os.path.join(cat_even, 'cat004.JPG') cat006 = os.path.join(cat_even, 'cat006.JPG') cat001 = os.path.join(first_cat, 'cat001.JPG') cat003 = os.path.join(cat_odd, 'cat003.JPG') cat005 = os.path.join(cat_odd, 'cat005.JPG') src_dir = os.path.join(Path.testdata, 'cats') shutil.copyfile(os.path.join(src_dir, 'cat001.jpg'), cat001) shutil.copyfile(os.path.join(src_dir, 'cat002.jpg'), cat002) shutil.copyfile(os.path.join(src_dir, 'cat003.jpg'), cat003) shutil.copyfile(os.path.join(src_dir, 'cat004.jpg'), cat004) shutil.copyfile(os.path.join(src_dir, 'cat005.jpg'), cat005) shutil.copyfile(os.path.join(src_dir, 'cat006.jpg'), cat006) with open(os.path.join(self.raw_dir, 'info.txt'), 'w') as f: f.write('We now have 5 cat pictures in this directory') def setup_photo_dir_structure(self): self.setup_raw_dir_structure() collector = PhotoCollector(self.dirname) collector._clean_photos_dir() collector._collect_files()
def item(category, folder, page): ##利用img base64 读取不在static下的文件 folder_dict_value = folder_dict.get(category) if folder_dict_value: root = folder_dict_value.get('root') folders = folder_dict_value.get('folders') item_key = u'%s-%s' % (category, folder) filenames = item_dict.get(item_key) if filenames: category_dict = Path.get_category() category_echo = category_dict.get(category) item_length = len(filenames) folder_length = len(folders) folder_index = folders.index(folder) last_folder_index = max(1, folder_index - 1) next_folder_index = min(folder_length - 1, folder_index + 1) last_folder = folders[last_folder_index] next_folder = folders[next_folder_index] ##上一篇以及下一篇 page = min(page, item_length) page = max(page, 1) ##page最小取值为1,最大取值为item_length filename = filenames[page - 1] extend = filename.rsplit('.', 1)[-1] img_path = u'%s/%s/%s' % (root, folder, filename) src = img_b64_dict.get(img_path) if src: pass else: img_base64 = Encrypt.img_b64encode(path=img_path) src = u'data:image/%s;base64,%s' % (extend, img_base64) if len(img_b64_dict) > 500: img_b64_dict.popitem(last=False) ##先进先出 img_b64_dict[img_path] = src ##将base64编码字符串放入dict中,便于下次访问(不能过大,否则占用过多内存) last_page = max(1, page - 1) next_page = min(item_length, page + 1) #上一页以及下一页 headers = request.headers agent = headers['User-Agent'] width = '70%' echo = 5 if 'linux' in agent.lower(): width = '100%' echo = 1 item_page_list = Page.get_list(page=page, pages=item_length, echo=echo) return render_template('/image/item.html',category=category,category_echo=category_echo,folder=folder,last_folder=last_folder,\ next_folder=next_folder,last_page=last_page,next_page=next_page,src=src,\ folder_length=folder_length,item_length=item_length,width=width,page=page,\ item_page_list=item_page_list,current_page=page) ##category不存在以及item_key不存在的情况下,均重定向到首页 return redirect('/')
def setUpClass(cls) -> None: super().setUpClass() cls.creator = MosaicCreator(Path.to_testphoto('wolf_high_res'), max_output_size=500)
class WebsiteExport(object): """ WebsiteExport will export a package as a website of HTML pages """ title = "Website (Zip)" def __init__(self, package, file_obj): """ 'style_dir' is the directory where we can copy the stylesheets from 'output_dir' is the directory that will be [over]written with the website """ static_dir = Path(settings.STATIC_ROOT) self.package = package self.style_dir = static_dir / "css" / "styles" / package.style self.scripts_dir = static_dir / "scripts" self.pages = [] self.file_obj = file_obj self.media_dir = Path(package.user.get_profile().media_path) self.page_class = WebsitePage self.output_dir = Path(tempfile.mkdtemp()) def export(self): """ Export web site Cleans up the previous packages pages and performs the export """ self.create_pages() self.save_pages() self.copy_files() # Zip up the website package self.doZip() # Clean up the temporary dir self.output_dir.rmtree() def doZip(self): """ Actually saves the zip data. Called by 'Path.safeSave' """ zipped = ZipFile(self.file_obj, "w") for scormFile in self.output_dir.files(): zipped.write(scormFile, scormFile.basename().\ encode('utf8'), ZIP_DEFLATED) zipped.close() def copy_style_files(self): """Copy style fiels to the export package""" style_files = ["%s/../base.css" % self.style_dir] style_files.append("%s/../popup_bg.gif" % self.style_dir) style_files += self.style_dir.files("*.css") style_files += self.style_dir.files("*.jpg") style_files += self.style_dir.files("*.gif") style_files += self.style_dir.files("*.svg") style_files += self.style_dir.files("*.png") style_files += self.style_dir.files("*.js") style_files += self.style_dir.files("*.html") self.style_dir.copylist(style_files, self.output_dir) def copy_licence(self): """Copy licence file""" if self.package.license == "GNU Free Documentation License": # include a copy of the GNU Free Documentation Licence (self.templatesDir / 'fdl.html').copyfile(\ self.output_dir / 'fdl.html') def copy_files(self): """ Copy all the files used by the website. """ # Copy the style sheet files to the output dir self.copy_style_files() self.copy_resources() self.scripts_dir.copylist(('libot_drag.js', 'jquery.js'), self.output_dir) self.copy_players() self.copy_licence() def create_pages(self, additional_kwargs={}): self.pages.append(self.page_class(self.package.root, 1, exporter=self, **additional_kwargs)) self.generate_pages(self.package.root, 2, additional_kwargs) def save_pages(self): for page in self.pages: page.save(self.output_dir) def copy_players(self): has_flowplayer = False has_magnifier = False has_xspfplayer = False is_break = False for page in self.pages: if is_break: break for idevice in page.node.idevices.all(): resources = idevice.as_child().system_resources if (has_flowplayer and has_magnifier and has_xspfplayer): is_break = True break if not has_flowplayer: if 'flowPlayer.swf' in resources: has_flowplayer = True if not has_magnifier: if 'magnifier.swf' in resources: has_magnifier = True if not has_xspfplayer: if 'xspf_player.swf' in resources: has_xspfplayer = True def copy_resources(self): view_media = [] for page in self.pages: view_media += page.view_media._js view_media += page.view_media._css.get('all', []) view_media = [medium.replace(settings.STATIC_URL, "") \ for medium in view_media] Path(settings.STATIC_ROOT).copylist(view_media, self.output_dir) self.media_dir.copylist(self.package.resources, self.output_dir) def generate_pages(self, node, depth, kwargs={}): """ Recursively generate pages and store in pages member variable for retrieving later. Kwargs will be used at page creation. """ for child in node.children.all(): page = self.page_class(child, depth, exporter=self, has_children=child.children.exists(), **kwargs) last_page = self.pages[-1] if self.pages else None if last_page: page.prev_page = last_page last_page.next_page = page self.pages.append(page) self.generate_pages(child, depth + 1)
def test_that_real_photo_returns_correct_color(self): photo = Photo.open(Path.to_testphoto('wolf_low_res')) expected_avg_color = (127, 111, 102) self.assertTupleEqual(expected_avg_color, photo.avg_color)
class WebsiteExport(object): """ WebsiteExport will export a package as a website of HTML pages """ title = "Website (Zip)" def __init__(self, package, file_obj): """ 'style_dir' is the directory where we can copy the stylesheets from 'output_dir' is the directory that will be [over]written with the website """ static_dir = Path(settings.STATIC_ROOT) self.package = package self.style_dir = static_dir / "css" / "styles" / package.style self.scripts_dir = static_dir / "scripts" self.pages = [] self.file_obj = file_obj self.media_dir = Path(package.user.get_profile().media_path) self.page_class = WebsitePage self.output_dir = Path(tempfile.mkdtemp()) def export(self): """ Export web site Cleans up the previous packages pages and performs the export """ self.create_pages() self.copyFiles() # Zip up the website package self.doZip() # Clean up the temporary dir self.output_dir.rmtree() def doZip(self): """ Actually saves the zip data. Called by 'Path.safeSave' """ zipped = ZipFile(self.file_obj, "w") for scormFile in self.output_dir.files(): zipped.write(scormFile, scormFile.basename().encode('utf8'), ZIP_DEFLATED) zipped.close() def copy_style_files(self): styleFiles = ["%s/../base.css" % self.style_dir] styleFiles.append("%s/../popup_bg.gif" % self.style_dir) styleFiles += self.style_dir.files("*.css") styleFiles += self.style_dir.files("*.jpg") styleFiles += self.style_dir.files("*.gif") styleFiles += self.style_dir.files("*.svg") styleFiles += self.style_dir.files("*.png") styleFiles += self.style_dir.files("*.js") styleFiles += self.style_dir.files("*.html") self.style_dir.copylist(styleFiles, self.output_dir) def copy_licence(self): if self.package.license == "GNU Free Documentation License": # include a copy of the GNU Free Documentation Licence self.templatesDir / 'fdl.html'.copyfile( self.output_dir / 'fdl.html') def copyFiles(self): """ Copy all the files used by the website. """ # Copy the style sheet files to the output dir self.copy_style_files() self.copy_resources() self.scripts_dir.copylist(('libot_drag.js', 'jquery.js'), self.output_dir) self.copy_players() self.copy_licence() def create_pages(self, additional_kwargs={}): self.pages.append( self.page_class(self.package.root, 1, exporter=self, **additional_kwargs)) self.generate_pages(self.package.root, 2, additional_kwargs) for page in self.pages: page.save(self.output_dir) def copy_players(self): hasFlowplayer = False hasMagnifier = False hasXspfplayer = False isBreak = False for page in self.pages: if isBreak: break for idevice in page.node.idevices.all(): resources = idevice.as_child().system_resources if (hasFlowplayer and hasMagnifier and hasXspfplayer): isBreak = True break if not hasFlowplayer: if 'flowPlayer.swf' in resources: hasFlowplayer = True if not hasMagnifier: if 'magnifier.swf' in resources: hasMagnifier = True if not hasXspfplayer: if 'xspf_player.swf' in resources: hasXspfplayer = True def copy_resources(self): view_media = [] for page in self.pages: view_media += page.view_media._js view_media += page.view_media._css.get('all', []) view_media = [medium.replace(settings.STATIC_URL, "") \ for medium in view_media] Path(settings.STATIC_ROOT).copylist(view_media, self.output_dir) self.media_dir.copylist(self.package.resources, self.output_dir) def generate_pages(self, node, depth, kwargs={}): """ Recursively generate pages and store in pages member variable for retrieving later. Kwargs will be used at page creation. """ for child in node.children.all(): page = self.page_class(child, depth, exporter=self, has_children=child.children.exists(), **kwargs) last_page = self.pages[-1] if self.pages else None if last_page: page.prev_page = last_page last_page.next_page = page self.pages.append(page) self.generate_pages(child, depth + 1)
# my library to operate paths inside disk space and site url in uniform way from utils.path import Path # third party html template engine from mako.lookup import TemplateLookup # root of everything we need root = Path.from_file(__file__).up() # here we'll search our source files with templates and pages markup data templ_dir = root / 'source' # directory for temporary files created by mako template engine cache_dir = root / 'cache' # directory where out generated site pages will be placed by this script output_dir = root / 'output' # mako shit for template rendering lookup = TemplateLookup( directories=[templ_dir.str()], module_directory=cache_dir.str(), input_encoding='utf-8', output_encoding='utf-8', ) def factory(filename): return lookup.get_template(filename) # function used inside teplates to tell them where is root of their subsite # in out case this subside is /pooh
:param total_nr_pixels: Total number of pixels to distribute :param nr_boxes: Total number of boxes to distribute over :return: List of tuples containing the box borders, e.g. [(0, 77), (77, 155), (155, 232), (232, 310), (310, 387), (387, 464), (464, 542), (542, 619), (619, 697), (697, 774)] """ nr_pixels_per_box: float = total_nr_pixels / nr_boxes return [(round(nr_pixels_per_box * index), round(nr_pixels_per_box * (index + 1))) for index in range(nr_boxes)] if __name__ == '__main__': c = MosaicCreator(Path.to_photo('wolf_high_res'), max_output_size=774) cheat_parameter = 50 img = c.photo_pixelate(Path.to_src_photos_dir('cats'), nr_pixels_in_x=40, nr_pixels_in_y=40) img.show() img.save('wolf_photo_pixelated.jpg') import sys sys.exit() max_output_size = 12500 src_photo = 'papmam1' c = MosaicCreator(Path.to_photo(src_photo), max_output_size=max_output_size, cheat_parameter=cheat_parameter) # img = c.pixelate(nr_pixels_in_x=18, nr_pixels_in_y=24)
def get_xubao_xml( filename, province, city, iscity, district, date, company='', ): config = YamlReader('config.yml') reader = ExcelReader('suzhou_single.xls') n = int(YamlReader('num.yml').get_data('num')) dictdata = reader.data[n] #记录+1 n = n + 1 YamlReader('num.yml').set_data(n) dictinsure = config.get_data('insured') dictdata = dict(dictdata, **dictinsure) dictdata['version'] = config.get_data('app').get('version') dictdata['insurance_company'] = company dictdata['city'] = city if dictdata['engine_no'] == '': dictdata['engine_no'] = '321352' if dictdata['seat_num'] == 0: dictdata['seat_num'] = 4 dictdata['district'] = district dictdata['addressee_name'] = '张汉' dictdata['addressee_mobile'] = '17621100888' dictdata['addressee_province'] = province dictdata['addressee_city'] = city dictdata['addressee_area'] = district dictdata['addressee_detail'] = '南京路新华小区102室' dictdata['insured_mobile'] = '17621100888' dictdata['applicant_mobile'] = '17621100888' dictdata['owner_mobile'] = '17621100888' dictdata['insured_province'] = province dictdata['insured_city'] = city dictdata['insured_district'] = district dictdata['insured_detail'] = '南京路新华小区102室' impl = minidom.getDOMImplementation() doc = impl.createDocument(None, None, None) rootElement = doc.createElement('dates') for key, value in dictdata.items(): # 创建子元素 childElement = doc.createElement('date') # 为子元素添加id属性 childElement.setAttribute('name', str(key)) childElement.setAttribute('value', str(value)) # 将子元素追加到根元素中 rootElement.appendChild(childElement) # 将拼接好的根元素追加到dom对象 doc.appendChild(rootElement) # 打开test.xml文件 准备写入 filename = os.path.join(Path().get_data_path(), filename) f = open(filename, 'w', encoding='UTF-8') # 清空数据 f.seek(0) f.truncate() # 写入文件 doc.writexml(f, addindent=' ', newl='\n', encoding='UTF-8') # 关闭 f.close()
def item(category, folder, page): ##利用img base64 读取不在static下的文件 folder_dict_value = folder_dict.get(category) if folder_dict_value: root = folder_dict_value.get('root') folders = folder_dict_value.get('folders') item_key = u'%s-%s' % (category, folder) filenames = item_dict.get(item_key) if filenames: category_dict = Path.get_category() category_echo = category_dict.get(category) item_length = len(filenames) folder_length = len(folders) folder_index = folders.index(folder) last_folder_index = max(1, folder_index - 1) next_folder_index = min(folder_length - 1, folder_index + 1) last_folder = folders[last_folder_index] next_folder = folders[next_folder_index] ##上一篇以及下一篇 page = min(page, item_length) page = max(page, 1) ##page最小取值为1,最大取值为item_length filename = filenames[page - 1] extend = filename.rsplit('.', 1)[-1] img_path = u'%s/%s/%s' % (root, folder, filename) src = img_path.split(':', 1)[-1] # print img_path last_page = max(1, page - 1) next_page = min(item_length, page + 1) #上一页以及下一页 headers = request.headers agent = headers['User-Agent'] width = '70%' echo = 5 if 'linux' in agent.lower(): width = '100%' echo = 1 item_page_list = Page.get_list(page=page, pages=item_length, echo=echo) return render_template('/mm/item.html',category=category,category_echo=category_echo,folder=folder,last_folder=last_folder,\ next_folder=next_folder,last_page=last_page,next_page=next_page,src=src,\ folder_length=folder_length,item_length=item_length,width=width,page=page,\ item_page_list=item_page_list,current_page=page) ##category不存在以及item_key不存在的情况下,均重定向到首页 return redirect('/')
def __init__(self,filename=None): if filename is None: self.__filename = Path().get_newcar_path() else: self.__filename = os.path.join(Path().basepath, 'data', filename)
# _*_ coding:utf-8 _*_ from flask import render_template, redirect, request from mm import mm import os import logging from utils.is_login import is_login from utils.path import Path from utils.page import Page from utils.encrypt import Encrypt import collections #######设置static_folder,便于直接访问 # root_dict=Path.get_img_folder_by_category() category_dict = Path.get_category() folder_dict = Path.read_json_to_dict(name=u'folder') item_dict = Path.read_json_to_dict(name=u'item') img_b64_dict = collections.OrderedDict() ##用于存放最近访问过的img的base64 @mm.route('/') @is_login def category(): return render_template('/mm/category.html', category_dict=category_dict) @mm.route('/<category>/<int:page>') @is_login
def get_xubao( filename, province, city, iscity, district, date, company='', ): province = province city = city company = company district = district config = YamlReader('config.yml') date = date host = config.get_data('mysql').get('host') port = config.get_data('mysql').get('port') user = config.get_data('mysql').get('user') passwd = config.get_data('mysql').get('passwd') db = config.get_data('mysql').get('db') helper = MysqlHelper(host=host, port=port, db=db, user=user, passwd=passwd) #result = helper.get_xubao(province,city,data) if iscity == 1: liben = get_li_beg(province=province, city=city) + '%' result = helper.get_xubao(province=province, city=liben, company=company, date=date) else: result = helper.get_xubao(province=province, company=company, date=date) if len(result) == 0: dictdata = {'engine_no': '', 'seat_num': ''} else: dictdata = result[0] print(dictdata) dictinsure = config.get_data('insured') dictdata = dict(dictdata, **dictinsure) dictdata['version'] = config.get_data('app').get('version') dictdata['insurance_company'] = company dictdata['city'] = city if dictdata['engine_no'] == '': dictdata['engine_no'] = '321352' if dictdata['seat_num'] == 0: dictdata['seat_num'] = 4 dictdata['district'] = district dictdata['addressee_name'] = '张汉' dictdata['addressee_mobile'] = '17621100888' dictdata['addressee_province'] = province dictdata['addressee_city'] = city dictdata['addressee_area'] = district dictdata['addressee_detail'] = '南京路新华小区102室' dictdata['insured_mobile'] = '17621100888' dictdata['applicant_mobile'] = '17621100888' dictdata['owner_mobile'] = '17621100888' dictdata['insured_province'] = province dictdata['insured_city'] = city dictdata['insured_district'] = district dictdata['insured_detail'] = '南京路新华小区102室' impl = minidom.getDOMImplementation() doc = impl.createDocument(None, None, None) rootElement = doc.createElement('dates') for key, value in dictdata.items(): # 创建子元素 childElement = doc.createElement('date') # 为子元素添加id属性 childElement.setAttribute('name', str(key)) childElement.setAttribute('value', str(value)) # 将子元素追加到根元素中 rootElement.appendChild(childElement) # 将拼接好的根元素追加到dom对象 doc.appendChild(rootElement) # 打开test.xml文件 准备写入 filename = os.path.join(Path().get_data_path(), filename) f = open(filename, 'w', encoding='UTF-8') # 清空数据 f.seek(0) f.truncate() # 写入文件 doc.writexml(f, addindent=' ', newl='\n', encoding='UTF-8') # 关闭 f.close()