def __setitem__(self, request, info, _to_fs=True): info_fp = self._get_info_fp(request) if _to_fs: # to fs logger.debug('request passed to __setitem__: %s', request) dp = os.path.dirname(info_fp) mkdir_p(dp) logger.debug('Created %s', dp) with open(info_fp, 'w') as f: f.write(info.to_full_info_json()) logger.debug('Created %s', info_fp) if info.color_profile_bytes: icc_fp = self._get_color_profile_fp(request) with open(icc_fp, 'wb') as f: f.write(info.color_profile_bytes) logger.debug('Created %s', icc_fp) # into mem # The info file cache on disk must already exist before # this is called - it's where the mtime gets drawn from. # aka, nothing outside of this class should be using # to_fs=False if self.size > 0: lastmod = datetime.utcfromtimestamp(os.path.getmtime(info_fp)) with self._lock: self._dict[request.url] = (info, lastmod) while len(self._dict) > self.size: self._dict.popitem(last=False)
def __setitem__(self, request, info): # to fs logger.debug('request passed to __setitem__: %s', request) info_fp = self._get_info_fp(request) dp = os.path.dirname(info_fp) mkdir_p(dp) logger.debug('Created %s', dp) with open(info_fp, 'w') as f: f.write(info.to_full_info_json()) logger.debug('Created %s', info_fp) if info.color_profile_bytes: icc_fp = self._get_color_profile_fp(request) with open(icc_fp, 'wb') as f: f.write(info.color_profile_bytes) logger.debug('Created %s', icc_fp) # into mem lastmod = datetime.utcfromtimestamp(os.path.getmtime(info_fp)) with self._lock: while len(self._dict) >= self.size: self._dict.popitem(last=False) self._dict[request.url] = (info,lastmod)
def copy_to_cache(self, ident): source_fp = self.source_file_path(ident) cache_fp = self.cache_file_path(ident) mkdir_p(dirname(cache_fp)) copy(source_fp, cache_fp) logger.info("Copied %s to %s", source_fp, cache_fp)
def __init__(self, config): self.map_profile_to_srgb = bool(config['map_profile_to_srgb']) self.mkfifo = config['mkfifo'] self.tmp_dp = config['tmp_dp'] if self.map_profile_to_srgb and \ ('PIL.ImageCms' not in sys.modules and 'ImageCms' not in sys.modules): logger.warn('Could not import profileToProfile from ImageCms.') logger.warn( 'Images will not have their embedded color profiles mapped to sSRGB.' ) self.map_profile_to_srgb = False else: self.srgb_profile_fp = config['srgb_profile_fp'] try: mkdir_p(self.tmp_dp) except OSError as ose: # Almost certainly a permissions error on one of the required dirs from sys import exit from os import strerror logger.fatal('%s (%s)', strerror(ose.errno), ose.filename) logger.fatal('Exiting') exit(77) super(_AbstractJP2Transformer, self).__init__(config)
def copy_to_cache(self, ident): ident = unquote(ident) cache_dir = self.cache_dir_path(ident) mkdir_p(cache_dir) #get source image and write to temporary file (source_url, options) = self._web_request_url(ident) with closing(requests.get(source_url, stream=True, **options)) as response: if not response.ok: public_message = 'Source image not found for identifier: %s. Status code returned: %s' % ( ident, response.status_code) log_message = 'Source image not found at %s for identifier: %s. Status code returned: %s' % ( source_url, ident, response.status_code) logger.warn(log_message) raise ResolverException(404, public_message) extension = self.cache_file_extension(ident, response) local_fp = join(cache_dir, "loris_cache." + extension) with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tmp_file: for chunk in response.iter_content(2048): tmp_file.write(chunk) tmp_file.flush() #now rename the tmp file to the desired file name if it still doesn't exist # (another process could have created it) if exists(local_fp): logger.info('another process downloaded src image %s', local_fp) remove(tmp_file.name) else: rename(tmp_file.name, local_fp) logger.info("Copied %s to %s", source_url, local_fp) # Check for rules file associated with image file # These files are < 2k in size, so fetch in one go. # Assumes that the rules will be next to the image # cache_dir is image specific, so this is easy bits = split(source_url) fn = bits[1].rsplit('.')[0] + "." + self.auth_rules_ext rules_url = bits[0] + '/' + fn try: resp = requests.get(rules_url) if resp.status_code == 200: local_rules_fp = join(cache_dir, "loris_cache." + self.auth_rules_ext) if not exists(local_rules_fp): fh = open(local_rules_fp, 'w') fh.write(r.text) fh.close() except: # No connection available pass return local_fp
def _link(source, link_name): if source == link_name: logger.warn('Circular symlink requested from %s to %s; not creating symlink', link_name, source) return link_dp = path.dirname(link_name) mkdir_p(link_dp) if path.lexists(link_name): # shouldn't be the case, but helps debugging unlink(link_name) symlink(source, link_name) logger.debug('Made symlink from %s to %s', link_name, source)
def test_creates_directory(self, tmpdir): path = str(tmpdir.join('test_creates_directory')) assert not os.path.exists(path) # If we create the directory, it springs into being utils.mkdir_p(path) assert os.path.exists(path) # If we try to create the directory a second time, we don't throw # an exception just because it already exists. utils.mkdir_p(path)
def _copy_index_and_favicon(config): www_dir = config['loris.Loris']['www_dp'] www_src = os.path.join(_src_code_repo_root(), 'www') index_src = os.path.join(www_src, 'index.txt') favicon_src = os.path.join(www_src, 'icons/favicon.ico') index_target = os.path.join(www_dir, 'index.txt') favicon_target_dir = os.path.join(www_dir, 'icons') favicon_target = os.path.join(favicon_target_dir, 'favicon.ico') mkdir_p(favicon_target_dir) shutil.copyfile(index_src, index_target) shutil.copyfile(favicon_src, favicon_target)
def test_if_error_is_unexpected_then_is_raised(self, tmpdir): """ If the error from ``os.makedirs()`` isn't because the directory already exists, we get an error. """ path = str(tmpdir.join('test_if_error_is_unexpected_then_is_raised')) message = "Exception thrown in utils_t.py for TestMkdirP" m = mock.Mock(side_effect=OSError(-1, message)) with mock.patch('loris.utils.os.makedirs', m): with pytest.raises(OSError): utils.mkdir_p(path)
def _make_directories(config): image_cache = config['img.ImageCache']['cache_dp'] info_cache = config['img_info.InfoCache']['cache_dp'] log_dir = config['logging']['log_dir'] tmp_dir = config['transforms']['jp2']['tmp_dp'] www_dir = config['loris.Loris']['www_dp'] loris_directories = [ image_cache, info_cache, tmp_dir, www_dir, log_dir, ] for d in loris_directories: mkdir_p(d)
def __init__(self, config): self.mkfifo = config['mkfifo'] self.tmp_dp = config['tmp_dp'] try: mkdir_p(self.tmp_dp) except OSError as ose: # Almost certainly a permissions error on one of the required dirs from sys import exit from os import strerror logger.fatal('%s (%s)', strerror(ose.errno), ose.filename) logger.fatal('Exiting') exit(77) super(_AbstractJP2Transformer, self).__init__(config)
def __init__(self, app_configs={}): '''The WSGI Application. Args: app_configs ({}): A dictionary of dictionaries that represents the loris.conf file. ''' self.app_configs = app_configs self.logger = configure_logging(app_configs['logging']) self.logger.debug('Loris initialized with these settings:') [ self.logger.debug('%s.%s=%s', key, sub_key, self.app_configs[key][sub_key]) for key in self.app_configs for sub_key in self.app_configs[key] ] # make the loris.Loris configs attrs for easier access _loris_config = self.app_configs['loris.Loris'] self.tmp_dp = _loris_config['tmp_dp'] try: mkdir_p(self.tmp_dp) except Exception as exc: raise ConfigError("Error creating tmp_dp %s: %r" % (self.tmp_dp, exc)) self.www_dp = _loris_config['www_dp'] self.enable_caching = _loris_config['enable_caching'] self.redirect_canonical_image_request = _loris_config[ 'redirect_canonical_image_request'] self.redirect_id_slash_to_info = _loris_config[ 'redirect_id_slash_to_info'] self.proxy_path = _loris_config.get('proxy_path', None) self.cors_regex = _loris_config.get('cors_regex', None) if self.cors_regex: self.cors_regex = re.compile(self.cors_regex) self.transformers = self._load_transformers() self.resolver = self._load_resolver() self.authorizer = self._load_authorizer() self.max_size_above_full = _loris_config.get('max_size_above_full', 200) if self.enable_caching: self.info_cache = InfoCache( self.app_configs['img_info.InfoCache']['cache_dp']) cache_dp = self.app_configs['img.ImageCache']['cache_dp'] self.img_cache = img.ImageCache(cache_dp)
def create_dir_and_return_file_path(self, image_request, image_info): target_fp = self.get_canonical_cache_path(image_request, image_info) target_dp = path.dirname(target_fp) mkdir_p(target_dp) return target_fp
def copy_to_cache(self, ident): ident = unquote(ident) #get source image and write to temporary file (source_url, options) = self._web_request_url(ident) assert source_url is not None cache_dir = self.cache_dir_path(ident) mkdir_p(cache_dir) with closing(requests.get(source_url, stream=True, **options)) as response: if not response.ok: logger.warn( "Source image not found at %s for identifier: %s. " "Status code returned: %s.", source_url, ident, response.status_code) raise ResolverException( "Source image not found for identifier: %s. " "Status code returned: %s." % (ident, response.status_code)) extension = self.cache_file_extension(ident, response) local_fp = join(cache_dir, "loris_cache." + extension) with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tmp_file: for chunk in response.iter_content(2048): tmp_file.write(chunk) # Now rename the temp file to the desired file name if it still # doesn't exist (another process could have created it). # # Note: This is purely an optimisation; if the file springs into # existence between the existence check and the copy, it will be # overridden. if exists(local_fp): logger.info('Another process downloaded src image %s', local_fp) remove(tmp_file.name) else: safe_rename(tmp_file.name, local_fp) logger.info("Copied %s to %s", source_url, local_fp) # Check for rules file associated with image file # These files are < 2k in size, so fetch in one go. # Assumes that the rules will be next to the image # cache_dir is image specific, so this is easy bits = split(source_url) fn = bits[1].rsplit('.')[0] + "." + self.auth_rules_ext rules_url = bits[0] + '/' + fn try: resp = requests.get(rules_url) if resp.status_code == 200: local_rules_fp = join(cache_dir, "loris_cache." + self.auth_rules_ext) if not exists(local_rules_fp): with open(local_rules_fp, 'w') as fh: fh.write(resp.text) except: # No connection available pass return local_fp
def copy_to_cache(self, ident): """ downloads image source file from s3, if not in cache already.""" ident = unquote(ident) # get source image and write to temporary file (bucketname, keyname) = self.s3bucket_from_ident(ident) try: s3obj = self.s3.Object(bucketname, keyname) content_type = s3obj.content_type except Exception as e: msg = 'no content_type for s3 object ({}:{}): {}'.format( bucketname, keyname, e) logger.error(msg) raise ResolverException(msg) extension = self.cache_file_extension(ident, content_type) cache_dir = self.cache_dir_path(ident) mkdir_p(cache_dir) local_fp = os.path.join(cache_dir, "loris_cache." + extension) with tempfile.NamedTemporaryFile( dir=cache_dir, delete=False) as tmp_file: try: self.s3.Bucket(bucketname).download_fileobj(keyname, tmp_file) except Exception as e: msg = 'unable to access or save s3 object ({}:{}): {}'.format( bucketname, keyname, e) logger.error(msg) raise ResolverException(msg) # Now rename the temp file to the desired file name if it still # doesn't exist (another process could have created it). # # Note: This is purely an optimisation; if the file springs into # existence between the existence check and the copy, it will be # overridden. if os.path.exists(local_fp): logger.info( 'Another process downloaded src image {}'.format(local_fp)) os.remove(tmp_file.name) else: safe_rename(tmp_file.name, local_fp) logger.info("Copied {}:{} to {}".format( bucketname, keyname, local_fp)) # Check for rules file associated with image file # These files are < 2k in size, so fetch in one go. # Assumes that the rules will be next to the image # cache_dir is image specific, so this is easy bits = os.path.split(keyname) # === bash basename fn = bits[1].rsplit('.')[0] + "." + self.auth_rules_ext rules_keyname = bits[0] + '/' + fn local_rules_fp = os.path.join( cache_dir, 'loris_cache.' + self.auth_rules_ext) try: self.s3.Object(bucketname, rules_keyname).download_file( local_rules_fp) except Exception as e: # no connection available? msg = 'ignoring rules file({}/{}) for ident({}): {}'.format( bucketname, rules_keyname, ident, e) logger.warn(msg) return local_fp