def __init__(self, package_name, package_path='templates', encoding='utf-8'): from pkg_resources import DefaultProvider, ResourceManager, \ get_provider provider = get_provider(package_name) self.encoding = encoding self.manager = ResourceManager() self.filesystem_bound = isinstance(provider, DefaultProvider) self.provider = provider self.package_path = package_path
def __init__(self, packages): self.searchpath = ['templates'] try: self.fsl = loaders.FileSystemLoader(self.searchpath) except Exception as e: log.error(e) self.modules = packages self.packages = {} self.encoding = 'utf-8' self.package_path = "templates" self.manager = ResourceManager()
def get_example_data(dataset_name): """ This is a smart package loader that locates text files inside our package :param dataset_name: :return: """ provider = get_provider('ebu_tt_live') manager = ResourceManager() source = provider.get_resource_string(manager, 'example_data/'+dataset_name) return source
def get_data_file(filename): """Return full path to specified data file or None if not found. If a valid absolute path is provided it will be returned. """ if os.path.exists(filename): return filename path = os.path.join(SOURCE_PATH, filename) if os.path.exists(path): return path try: return ResourceManager().resource_filename( Requirement.parse("netsink"), filename) except DistributionNotFound: return None
def __init__(self): assert isinstance(self.pathDocumentation, str), 'Invalid documentation path %s' % self.pathDocumentation assert isinstance(self.packageName, str), 'Invalid package name %s' % self.packageName assert isinstance(self.pathsTemplates, list), 'Invalid templates paths %s' % self.pathsTemplates assert isinstance(self.patternTemplate, str), 'Invalid template pattern %s' % self.patternTemplate assert isinstance(self.patternCopy, str), 'Invalid template copy %s' % self.patternCopy assert isinstance(self.packagePath, str), 'Invalid package path %s' % self.packagePath super().__init__() self._packageProvider = get_provider(self.packageName) self._manager = ResourceManager() self._rPatternTemplate = re.compile(self.patternTemplate) self._rPatternCopy = re.compile(self.patternCopy)
def executeProcess(self, assembly): proc = assembly.create(solicit=TestSolicit) assert isinstance(proc, Processing) #use packageProvider (not os package) to access files from inside the package (like config_test.xml) packageProvider = get_provider(__name__) manager = ResourceManager() self.assertTrue(packageProvider.has_resource('config_test.xml'), 'Xml Config file missing') content = packageProvider.get_resource_stream(manager, 'config_test.xml') solicit = proc.ctx.solicit(stream=content, uri = 'file://%s' % 'config_test.xml') arg = proc.execute(FILL_ALL, solicit=solicit) assert isinstance(arg.solicit, TestSolicit) content.close() return arg
def get_package_loader(self, package, package_path): from pkg_resources import DefaultProvider, ResourceManager, get_provider loadtime = datetime.utcnow() provider = get_provider(package) manager = ResourceManager() filesystem_bound = isinstance(provider, DefaultProvider) def loader(path): path = posixpath.join(package_path, path) if path is None or not provider.has_resource(path): return (None, None) basename = posixpath.basename(path) if filesystem_bound: return (basename, self._opener(provider.get_resource_filename(manager, path))) return (basename, lambda : (provider.get_resource_stream(manager, path), loadtime, 0)) return loader
def getFilesToImport(self, ext=None, context=None): """ Return list of configuration files of given extension with full pathnames """ if ext in self.pkg_names: files = self.pkg_confs[self.pkg_names.index(ext)] if files: filenames = [] for f in files: filename = ResourceManager().resource_filename(ext, 'conf/' + f) filenames.append(filename) # if ZMSActions are included but no Provider available - create it if context is not None: if ('.metacmd.' in f) \ and ('ZMSMetacmdProvider' not in [x.meta_type fox x in context.objectValues()]) \ and ('ZMSMetacmdProviderAcquired' not in [x.meta_type fox x in context.objectValues()]): context.REQUEST.set('meta_type', 'ZMSMetacmdProvider') context.manage_customizeSystem('Add', 'Manager', context.REQUEST['lang'], context.REQUEST) return filenames
def __init__(self, app=None, prefix='/static', cache_max_age=60, route_name='static'): self.__class__.__registry__[app] = self self.app = app self.prefix = prefix self.route_name = route_name self.sources = OrderedDict() self.resource_manager = ResourceManager() #: Default ``Cache-Control: max-age`` value self.cache_max_age = cache_max_age #: Add "Access-Control-Allow-Origin: *" header? self.access_control_allow_origin = '*' if app is not None: self.init_app(app)
def __init__(self): self.pkg = {} self.pkg_names = [] self.pkg_available = [] self.pkg_hints = [] self.pkg_infos = [] self.pkg_ready = [] self.pkg_confs = [] self.pkg_installed = [] self.pkg_urls = [] for name, info in sorted(EXTENSIONS.iteritems()): self.pkg_names.append(name) self.pkg_available.append(info[0]) self.pkg_hints.append(info[1]) self.pkg_infos.append(info[3]) self.pkg_urls.append(info[2]) package = str(WorkingSet().find(Requirement.parse(name))).split() if ((name in package) and (len(package) == 2)): # TODO: **Normalize Versions** acc. to `PEP 440`: http://legacy.python.org/dev/peps/pep-0440/ # The version specified requires normalization, consider using '3.2.0.dev3' instead of '3.2.0dev3' etc. + # pip v6.0.6 does not append svn revision specified in `setup.cfg` as v1.5.6 before # => `zms.zms_version()` have to be adjusted too... self.pkg_installed.append(package[1].replace('.dev', 'dev').replace('dev0', 'dev')) self.pkg_ready.append(True) try: confres = ResourceManager().resource_listdir(name, 'conf') except: confres = None if confres: confxml = [ob for ob in confres if ob.endswith('.xml') or ob.endswith('.zip')] if len(confxml) > 0: self.pkg_confs.append(confxml) else: self.pkg_confs.append(None) else: self.pkg_confs.append(None) else: self.pkg_installed.append(None) self.pkg_confs.append(None) self.pkg_ready.append(False)
def get_package_loader(self, package, package_path): from pkg_resources import DefaultProvider, ResourceManager, \ get_provider loadtime = datetime.utcnow() provider = get_provider(package) manager = ResourceManager() filesystem_bound = isinstance(provider, DefaultProvider) def loader(path): if path is None: return None, None path = posixpath.join(package_path, path) if not provider.has_resource(path): return None, None basename = posixpath.basename(path) if filesystem_bound: return basename, self._opener( provider.get_resource_filename(manager, path)) s = provider.get_resource_string(manager, path) return basename, lambda: (BytesIO(s), loadtime, len(s)) return loader
def build(output_basename, input_modules, selfinclude=False, separator="."): """ Creates a build of javascript files """ distribution = get_distribution('modularjs') with open('%s.js' % output_basename, 'w') as output: if selfinclude: output.write('var __build__ = true;\n'); include('include', output) for input_module in input_modules: include(input_module, output) if selfinclude: output.write('\nmodularjs.init();'); modularjslogger.info('Wrote %s.js' % output_basename) with open('%s%scompressed.js' % (output_basename, separator), 'w') as output: yui = os.path.join('lib', 'yuicompressor-2.4.2.jar') jar = distribution.get_resource_filename(ResourceManager(), yui) p = subprocess.Popen(['java', '-jar', jar, '%s.js' % output_basename], stdout=output) p.wait() modularjslogger.info('Wrote %s.compressed.js' % output_basename)
def __init__(self, package_name, package_path): super().__init__() from pkg_resources import ResourceManager, get_provider self.provider = get_provider(package_name) self.manager = ResourceManager() self.package_path = package_path
def setUp(self): self.converter = CharsetConverter() self.fm = FileManager() self.samples = path(ResourceManager().resource_filename(__package__, "samples"))
"""Handle program wide resources (files, images, etc...)""" import os from pkg_resources import ResourceManager from appdirs import user_data_dir resource_manager = ResourceManager() user_path = user_data_dir('mlox', 'mlox') if not os.path.isdir(user_path): os.makedirs(user_path) base_file = os.path.join(user_path, "mlox_base.txt") user_file = os.path.join(user_path, "mlox_user.txt") # For the updater UPDATE_BASE = "mlox-data.7z" update_file = os.path.join(user_path, UPDATE_BASE) UPDATE_URL = 'https://svn.code.sf.net/p/mlox/code/trunk/downloads/' + UPDATE_BASE
def get_htdocs_dirs(self): return [('statushistorychart', ResourceManager().resource_filename(__name__, 'htdocs'))]
'''Unit tests for file chooser ''' from guitest.gtktest import GtkTestCase from pkg_resources import ResourceManager from path import path from baudot.widget import FileFolderChooser SAMPLES = path(ResourceManager().resource_filename(__package__, "samples")) class FileFolderChooserTest(GtkTestCase): '''Unit tests for FileFolderChooser class ''' def test_init(self): '''Test __init__ method ''' ffc = FileFolderChooser() self.assertIsNotNone(ffc) def test_selection(self): '''Test file selection scenarios ''' ffc = FileFolderChooser() self.assertFalse(ffc.get_filenames()) ffc.chooser.set_current_folder(SAMPLES / "dir1") empty_dir = SAMPLES / "dir1" / "empty" self.assertTrue(ffc.chooser.select_filename(empty_dir))
def get_htdocs_dirs(self): return [('hidefieldchanges', ResourceManager().resource_filename(__name__, 'htdocs'))]
def get_contact_distance_map(structure_directory=INFO_DIRECTORY, westhof_vector=None, force_rebuild=False): """ Returns contact distance map The contact distance map is cached it in the user directory and updated when newer files are found. :param structure_directory: directory to look up structure information text files :param westhof_vector: list of factors to apply different weights to the bonding family classes (defaults to ``[1, 1, ... ]``) :param force_rebuild: force rebuilding the distance map """ # default: same weight for all families if not westhof_vector: westhof_vector = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] nucleotides = ["A", "U", "G", "C"] # build a dict of filenames # if a local file is present in the user directory it will take precedence over the system wide shipped version structure_filenames = {} resource_manager = ResourceManager() for nt1 in nucleotides: for nt2 in nucleotides: ntpair = "%s-%s" % (nt1, nt2) local_file = structure_directory + os.sep + ntpair + ".txt" if os.path.isfile(local_file): structure_filenames[ntpair] = local_file else: structure_filenames[ntpair] = resource_manager.resource_filename(__name__, "structure_info/%s.txt" % ntpair) # try to use a cached version of the distance map if found and recent and force_rebuild is False if not force_rebuild: try: cache_ok = True if os.path.isfile(CACHE_DISTANCEMAP): cache_timestamp = os.path.getmtime(CACHE_DISTANCEMAP) for d in structure_filenames.itervalues(): if os.path.getmtime(d) > cache_timestamp: cache_ok = False print "Contact map cache out of date. Rebuilding..." break if cache_ok: with open(CACHE_DISTANCEMAP, "r") as f: return pickle.load(f) except (IOError, pickle.PickleError, AttributeError, EOFError, IndexError): print "Contact map cache broken. Rebuilding..." print "Building contact distance map:" pdb_structure_dict = {} distance_map = {} for nt1 in nucleotides: for nt2 in nucleotides: distance_map_res_pair = {} pdb_codes = [] residues = [] # read the structures for the 12 edge-to-edge bonding families for line in utils.read_file_line_by_line(structure_filenames[nt1 + '-' + nt2]): fields = line.split(" ") if fields[0] != "-": pdb_codes.append(fields[0].upper()) residues.append((int(fields[1]), int(fields[2]))) else: pdb_codes.append(None) residues.append(None) # loop over all pdbcodes and their index in the list (0-11) for index, pdb_code in enumerate(pdb_codes): # skip if we don't have any entry for this family if pdb_code is None: continue # download pdb if necessary if pdb_code not in pdb_structure_dict: pdb_structure_dict[pdb_code] = pdbtools.parse_pdb(pdb_code, pdbtools.get_pdb_by_code(pdb_code)) # extract model from pdb model = pdb_structure_dict[pdb_code][0] # try to find the residue contact specified. this is done by looping over all chains in the model, # and checking if the residue is in there and is the correct nucleotide def find_res(res, resname): for chain in model: try: if chain[res].get_resname().strip() == resname: return chain[res] except KeyError: pass return None res1 = find_res(residues[index][0], nt1) res2 = find_res(residues[index][1], nt2) if not res1 or not res2: raise Exception("Could not find residue contact in pdb file: %s-%s %s %s %s" % (nt1, nt2, pdb_code, residues[index][0], residues[index][1])) print "%s-%s %s %s %s" % (nt1, nt2, pdb_code, residues[index][0], residues[index][1]) # add all atom-atom contacts to the distance map for the current residue pair for atom1 in res1: for atom2 in res2: if not (atom1.name.startswith('H') or atom2.name.startswith('H')): contact_key = str(atom1.name) + '-' + str(atom2.name) distance = westhof_vector[index] * (atom1 - atom2) if contact_key not in distance_map_res_pair: distance_map_res_pair[contact_key] = [distance] else: distance_map_res_pair[contact_key].append(distance) distance_map[nt1 + nt2] = distance_map_res_pair # save distance map in cache utils.mkdir_p(CACHE_DIRECTORY) with open(CACHE_DISTANCEMAP, "w") as f: pickle.dump(distance_map, f) return distance_map
def setUp(self): self.samples = path(ResourceManager().resource_filename( __package__, "samples"))
def get_templates_dirs(self): return [ResourceManager().resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self): return [('epochfield', ResourceManager().resource_filename(__name__, 'htdocs'))]
def regenerate_web(self): notebook_dirname = lambda x: 'n{:03}'.format(x) page_filename = lambda p, n: 'n{:03}/p{:06}.html'.format(n, p) notebook_covername = lambda x: 'images/note/n{:07x}_0.png'.format(x) schedule_dirname = lambda x: 's{:03}'.format(x) sch_page_filename = lambda p, n: 's{:03}/p{:06}.html'.format(n, p) schedule_covername = lambda x: 'images/schedule/s{:07x}_0.png'.format(x ) def form_filename(id_, thumb=False): notebook = self.d['forms'][id_]['notebook'] if notebook == -1: # uform return 'images/{thumb}uform/f{id:07x}_0.png'.format( thumb='thumbnail/' if thumb else '', id=id_) elif notebook == 0: # built-in form return 'images/{thumb}form/f{id:07x}_0.png'.format( thumb='thumbnail/' if thumb else '', id=id_) else: # imported form return 'images/{thumb}impt/n{nb:06x}/f{id:07x}_0.png'.format( thumb='thumbnail/' if thumb else '', id=id_, nb=notebook) def page_imagename(id_, notebook, layer, thumb=False): return 'images/{thumb}page/n{nb:06x}/{tp}{id:07x}_{layer}.png'.format( thumb='thumbnail/' if thumb else '', tp='t' if thumb else 'p', id=id_, nb=notebook, layer=layer) def sch_form_filename(id_, schedule): return 'images/sch_form/s{sch:06x}/f{id:07x}_0.png'.format( sch=schedule, id=id_) def sch_page_imagename(id_, schedule, layer, thumb=False): return 'images/{thumb}sch_page/s{sch:06x}/{tp}{id:07x}_{layer}.png'.format( thumb='thumbnail/' if thumb else '', tp='t' if thumb else 'p', id=id_, sch=schedule, layer=layer) # copy over static files self._mkdir('static') provider = get_provider('chicraccoon') static_dir = provider.get_resource_filename(ResourceManager(), 'web_static') for entry in os.scandir(static_dir): shutil.copy(entry.path, self._path('static')) # generate HTML env = Environment(loader=PackageLoader('chicraccoon', 'web_templates'), autoescape=select_autoescape(['html']), trim_blocks=True, lstrip_blocks=True) # generate index page index_template = env.get_template('index.html') notebooks = [] for id_ in self.d['notebooks']: cover = notebook_covername(id_) if not os.path.exists(self._path(cover)): cover = 'static/notebook_default.png' notebooks.append({ 'link': '{}/index.html'.format(notebook_dirname(id_)), 'cover': cover }) schedules = [] for id_ in self.d['schedules']: cover = schedule_covername(id_) if not os.path.exists(self._path(cover)): cover = 'static/schedule_default.png' schedules.append({ 'link': '{}/index.html'.format(schedule_dirname(id_)), 'cover': cover }) with open(self._path('index.html'), 'w') as f: f.write( index_template.render(notebooks=notebooks, schedules=schedules)) # generate note and notebook pages notebook_template = env.get_template('notebook.html') page_template = env.get_template('notebook_page.html') for id_, notebook in self.d['notebooks'].items(): self._mkdir(notebook_dirname(id_)) pages = [] page_ids = notebook['pages'] for i, page_id in enumerate(page_ids): page = self.d['pages'][page_id] thumb_layers = [form_filename(page['form'], True)] layers = [form_filename(page['form'])] if os.path.exists(self._path(page_imagename(page_id, id_, 0))): thumb_layers.append(page_imagename(page_id, id_, 0, True)) thumb_layers.append(page_imagename(page_id, id_, 1, True)) layers.append(page_imagename(page_id, id_, 0)) layers.append(page_imagename(page_id, id_, 1)) prev_link = None if i != 0: prev_link = page_filename(page_ids[i - 1], id_) next_link = None if i != len(page_ids) - 1: next_link = page_filename(page_ids[i + 1], id_) with open(self._path(page_filename(page_id, id_)), 'w') as f: f.write( page_template.render(layers=layers, base_dir='../', page_num=i + 1, pages_total=len(page_ids), prev_link=prev_link, next_link=next_link)) pages.append({ 'layers': thumb_layers, 'link': page_filename(page_id, id_) }) with open(self._path(notebook_dirname(id_), 'index.html'), 'w') as f: f.write(notebook_template.render(pages=pages, base_dir='../')) # generate schedule pages one_day = datetime.timedelta(days=1) parse_date = lambda x: datetime.datetime.utcfromtimestamp(x).date() schedule_template = env.get_template('schedule.html') sch_page_template = env.get_template('schedule_page.html') for id_, schedule in self.d['schedules'].items(): self._mkdir(schedule_dirname(id_)) page_objects = list([(x, self.d['sch_pages'][x]) for x in schedule['pages']]) start_date = parse_date(schedule['start_date']) end_date = parse_date(schedule['end_date']) calendar = [] last_month = -1 week = [] date = start_date date -= one_day * date.weekday() # go to beginning of the week page = 0 while date <= end_date: while (page < len(page_objects) - 1) and \ (date > parse_date(page_objects[page][1]['end_date'])): page += 1 week.append({ 'day': date.day, 'date': date.strftime('%Y-%m-%d'), 'link': sch_page_filename(page_objects[page][0], id_), 'touched': page_objects[page][1]['touched'] }) if date.weekday() == 6: if last_month != date.month: calendar.append({ 'days': week, 'month': date.strftime('%B %Y') }) last_month = date.month else: calendar.append({'days': week}) week = [] date += one_day if len(week) > 0: calendar.append(week) week = [] with open(self._path(schedule_dirname(id_), 'index.html'), 'w') as f: f.write( schedule_template.render(calendar=calendar, base_dir='../')) for i, (page_id, page) in enumerate(page_objects): layers = [sch_form_filename(page_id, id_)] if os.path.exists( self._path(sch_page_imagename(page_id, id_, 0))): layers.append(sch_page_imagename(page_id, id_, 0)) layers.append(sch_page_imagename(page_id, id_, 1)) prev_link = None if i != 0: prev_link = sch_page_filename(page_objects[i - 1][0], id_) next_link = None if i != len(page_objects) - 1: next_link = sch_page_filename(page_objects[i + 1][0], id_) start_date = parse_date(page['start_date']).isoformat() end_date = parse_date(page['end_date']).isoformat() with open(self._path(sch_page_filename(page_id, id_)), 'w') as f: f.write( sch_page_template.render(base_dir='../', layers=layers, prev_link=prev_link, next_link=next_link, start_date=start_date, end_date=end_date))
import re import os import tw2.core as twc from pkg_resources import ResourceManager rm = ResourceManager() tinymce_dir = twc.DirLink(modname=__name__, filename="static/tiny_mce") #tinymce_js = twc.JSLink(modname=__name__, filename='static/tinymce.js') #tinymce_css = twc.CSSLink(modname=__name__, filename='static/tinymce.css') tinymce_js = twc.JSLink(modname=__name__, filename='static/tiny_mce/tiny_mce_src.js', init=twc.js_function('tinyMCE.init')) def _get_available_languages(): filename_re = re.compile(r'(\w+)\.js') langs = [] locale_dir = rm.resource_filename(__name__, "static/tiny_mce/langs") for filename in os.listdir(locale_dir): match = filename_re.match(filename) if match: langs.append(match.groups(0)[0]) return langs from formencode.validators import UnicodeString, Validator from genshi.core import Markup, stripentities
def get_htdocs_dirs(self): return [('contextchrome', ResourceManager().resource_filename(__name__, 'htdocs'))]
def get_htdocs_dirs(self): return [('ldrize', ResourceManager().resource_filename(__name__, 'htdocs'))]
def setUp(self): self.cmd = ConvertCommand(None, None, None) self.samples = path(ResourceManager().resource_filename( __package__, "samples"))
def get_htdocs_dirs(self): return [('numberlisttoticketlist', ResourceManager().resource_filename(__name__, 'htdocs'))]
def get_htdocs_dirs(self): return [('querystatushelper', ResourceManager().resource_filename(__name__, 'htdocs'))]
from datetime import datetime from genshi.builder import tag from genshi.filters.transform import Transformer from pkg_resources import ResourceManager from trac.cache import cached from trac.config import ListOption from trac.core import Component, implements from trac.ticket.api import ITicketManipulator, TicketSystem from trac.ticket.model import Ticket from trac.util.datefmt import format_datetime, from_utimestamp, to_timestamp, \ format_date, format_time from trac.web.api import ITemplateStreamFilter, IRequestFilter from trac.web.chrome import add_script, ITemplateProvider, add_stylesheet import re is_trac_ja = ResourceManager().resource_exists('trac.wiki', 'default-pages/TracJa') # Is patch need or not, for trac-ja from interact # https://twitter.com/#!/jun66j5/status/180856879155658753 by @jun66j5; # "まともな方法がなくて、前にやったのは trac/wiki/default-pages/TracJa # があるかどうかを pkg_resources.resource_filename で調べてました" class EpochField(Component): implements(ITemplateStreamFilter, ITemplateProvider, IRequestFilter, ITicketManipulator) date_columns = ListOption('epochfield', 'date_columns', '.*_date', doc=""" field-names you want to translate from epoch to date-string in regular-expressions."""