def _dist_activated(dist): entries = dist.get_entry_map('httpencode.format') for name in entries: data = (dist.key, name) if name.startswith('name '): format_name = name[5:].strip() if _format_names.get(format_name, data) != data: raise pkg_resources.VersionConflict( "Distribution %r has identical format name (%r) as distribution %r" % (dist, format_name, _format_names[format_nmame][0])) _format_names[format_name] = data continue parts = name.split() if len(parts) != 3 or parts[1] != 'to': warnings.warn( 'Entry point [httpencode.format] %r in distribution ' '%r is not a valid format' % (name, dist)) continue mimetype, type = parts[0], parts[2] mdict = _format_mimetypes.setdefault(mimetype, {}) if mdict.get(type, data) != data: raise pkg_resources.VersionConflict( "Distribution %r has an identical conversion (%r) as distribution %r" % (dist, name, mdict[type][0])) mdict[type] = data tdict = _format_types.setdefault(type, {}) # If mdict didn't have a dup, this shouldn't # either assert tdict.get(mimetype, data) == data tdict[mimetype] = data
def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the `site-packages` dir via `pkg_resources`. Args: requirement (:obj:`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (:obj:`str`, `optional`): what suggestion to print in case of requirements not being met """ # note: while pkg_resources.require_version(requirement) is a much simpler way to do it, it # fails if some of the dependencies of the dependencies are not matching, which is not necessarily # bad, hence the more complicated check - which also should be faster, since it doesn't check # dependencies of dependencies. hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2})(.+)", requirement) if not match: raise ValueError( f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}" ) pkg, op, want_ver = match[0] if op not in ops: raise ValueError(f"need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise pkg_resources.VersionConflict( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}." ) return # check if any version is installed try: got_ver = pkg_resources.get_distribution(pkg).version except pkg_resources.DistributionNotFound: raise pkg_resources.DistributionNotFound(requirement, ["this application", hint]) # check that the right version is installed if version number was provided if want_ver is not None and not ops[op](version.parse(got_ver), version.parse(want_ver)): raise pkg_resources.VersionConflict( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def test_module_mixin_props(self): m = self.create_default_test_obj() with mock.patch('pkg_resources.require') as mock_require: res = m.dependencies_satisfied self.assertTrue(res) m._dependencies = self.test_dependencies mock_require.side_effect = [pkg_resources.VersionConflict(), None] m._dependencies_satisfied = None res = m.dependencies_satisfied self.assertFalse(res) m._dependencies_satisfied = None res = m.dependencies_satisfied self.assertTrue(res) self.assertEqual('0.0.1', m.version) self.assertEqual('TestObjectProcessingOperator (0.0.1)', m.full_name) self.assertEqual('TestObjectProcessingOperator', m.short_name) self.assertFalse(m.is_initialized) m.initialize() self.assertTrue(m.is_initialized)
def check_previous_requirement(self, req, dist, prev_req): log.debug(" -- checking previously found requirements: %s vs %s", prev_req, req) # Here is where we can possibly backtrack in our graph walking. # We need to check if we can merge the new requirement with ones # that we found previously. This merging is done on the rules of # specificity - ie, creating a new requirement that is bounded # by the most specific specs from both old and new. try: merged_req = dependency.merge_requirements(prev_req, req) except dependency.CannotMergeError: log.debug(" --- cannot merge requirements") raise VersionConflict(pkg_resources.VersionConflict(dist, req), self.ws, prev_req, self.best, self.req_graph) log.debug(" --- merged requirement: %s" % merged_req) if dist is None: log.debug(' --- purging unsatisfied requirement %s' % prev_req) self.purge_req(prev_req) return None, merged_req log.debug(' --- already have dist: %r' % dist) if not self.use_existing and all(op != '==' for op, _ in merged_req.specs): avail = next((dist for dist in self.installer._satisfied(merged_req) if dist is not None), None) if avail is not None and avail != dist: # There is a better version available; use it. log.debug(' --- upgrading %r to %r' % (dist, avail)) self.backout_requirement(prev_req) dist = self._get_dist(merged_req) if prev_req.hashCmp in (req.hashCmp, merged_req.hashCmp): # The req is the same as one we know about; probably it was in the # original working set. log.debug( " --- prev req {0} was more specific, ignoring {1}".format( prev_req, req)) return dist, None if dist in merged_req: # The dist we've already picked matches the more new req log.debug(" --- upgrading to more specific requirement %s -> %s", prev_req, merged_req) # Add a new node in our graph for the merged requirement. self.req_graph.add_node(merged_req) for i in self.req_graph.successors(prev_req): log.debug(" ---- adding edge from %s to %s" % (merged_req, i)) self.req_graph.add_edge(merged_req, i) return dist, merged_req # The dist doesn't match, back it out and send the merged req back for # another pass. log.debug(" *** overriding requirement %r with %r" % (prev_req, req)) self.backout_requirement(prev_req) return None, merged_req
def check_api_dependencies(): """ This function tests if the API is properly configured. """ try: with open('requirements.txt', 'r') as f: dependencies = list() for line in f.readlines(): dependencies.append(line.strip()) f.close() except: raise FileNotFoundError("requirements.txt file not found or errored.") for dependency in dependencies: dependency_version = get_dependency_version(dependency) try: pkg_resources.require(dependency) except pkg_resources.DistributionNotFound: raise pkg_resources.DistributionNotFound(f'[{dependency}] not found, please check that it is properly installed.') except pkg_resources.VersionConflict: raise pkg_resources.VersionConflict(f'[{dependency}] version errored, since required version is {dependency_version}')
def test_module_initialize(self, mock_require): m = self.create_default_test_obj() m._dependencies = self.test_dependencies mock_require.side_effect = [pkg_resources.VersionConflict(), True] with self.assertRaises(DependenciesNotSatisfiedError): m.initialize() self.assertFalse(m.is_initialized) m._dependencies_satisfied = None with mock.patch.object(m, '_initialize') as mock_initialize: mock_initialize.side_effect = [ValueError()] with self.assertRaises(InitializationError) as cm: m.initialize() self.assertIsInstance(cm.exception.cause, ValueError) self.assertFalse(m.is_initialized) m.initialize() self.assertTrue(m.is_initialized)
def test_pipeline_props(self): m1 = TestOperator1(auto_init=False) m2 = TestOperator2(auto_init=False) m3 = TestOperator3() pipeline = TestObjectProcessingOperatorsPipeline([m1, m2, m3]) self.assertFalse(pipeline.is_initialized) m1.initialize() m2.initialize() self.assertTrue(pipeline.is_initialized) self.assertEqual('0.0.1', pipeline.version) self.assertEqual('TestObjectProcessingOperatorsPipeline (0.0.1)', pipeline.full_name) self.assertEqual('TestObjectProcessingOperatorsPipeline', pipeline.short_name) with mock.patch('pkg_resources.require') as mock_require: res = pipeline.dependencies_satisfied self.assertTrue(res) m4 = TestOperator3(auto_init=False) m4._dependencies = self.test_dependencies pipeline.append(m4) mock_require.side_effect = [pkg_resources.VersionConflict(), None] res = pipeline.dependencies_satisfied self.assertFalse(res) m4._dependencies_satisfied = None res = pipeline.dependencies_satisfied self.assertTrue(res)
def install(self, specs, working_set=None): logger.debug('Installing %s.', repr(specs)[1:-1]) for_buildout_run = bool(working_set) path = self._path dest = self._dest if dest is not None and dest not in path: path.insert(0, dest) requirements = [ self._constrain(pkg_resources.Requirement.parse(spec)) for spec in specs ] if working_set is None: ws = pkg_resources.WorkingSet([]) else: ws = working_set for requirement in requirements: for dist in self._get_dist(requirement, ws, for_buildout_run=for_buildout_run): ws.add(dist) self._maybe_add_setuptools(ws, dist) # OK, we have the requested distributions and they're in the working # set, but they may have unmet requirements. We'll resolve these # requirements. This is code modified from # pkg_resources.WorkingSet.resolve. We can't reuse that code directly # because we have to constrain our requirements (see # versions_section_ignored_for_dependency_in_favor_of_site_packages in # zc.buildout.tests). requirements.reverse() # Set up the stack. processed = {} # This is a set of processed requirements. best = {} # This is a mapping of package name -> dist. # Note that we don't use the existing environment, because we want # to look for new eggs unless what we have is the best that # matches the requirement. env = pkg_resources.Environment(ws.entries) while requirements: # Process dependencies breadth-first. current_requirement = requirements.pop(0) req = self._constrain(current_requirement) if req in processed: # Ignore cyclic or redundant dependencies. continue dist = best.get(req.key) if dist is None: try: dist = env.best_match(req, ws) except pkg_resources.VersionConflict as err: logger.debug( "Version conflict while processing requirement %s " "(constrained to %s)", current_requirement, req) # Installing buildout itself and its extensions and # recipes requires the global # ``pkg_resources.working_set`` to be active, which also # includes all system packages. So there might be # conflicts, which are fine to ignore. We'll grab the # correct version a few lines down. if not for_buildout_run: raise VersionConflict(err, ws) if dist is None: if dest: logger.debug('Getting required %r', str(req)) else: logger.debug('Adding required %r', str(req)) _log_requirement(ws, req) for dist in self._get_dist(req, ws, for_buildout_run=for_buildout_run): ws.add(dist) self._maybe_add_setuptools(ws, dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency. raise VersionConflict(pkg_resources.VersionConflict(dist, req), ws) best[req.key] = dist requirements.extend(dist.requires(req.extras)[::-1]) processed[req] = True return ws
def set_auth_properties(info: RedshiftProperty): """ Helper function to handle IAM and Native Auth connection properties and ensure required parameters are specified. Parameters """ import pkg_resources from packaging.version import Version if info is None: raise InterfaceError( "Invalid connection property setting. info must be specified") # IAM requires an SSL connection to work. # Make sure that is set to SSL level VERIFY_CA or higher. if info.ssl is True: if info.sslmode not in SupportedSSLMode.list(): info.put("sslmode", SupportedSSLMode.default()) _logger.debug( "A non-supported value: {} was provides for sslmode. Falling back to default value: {}" .format(info.sslmode, SupportedSSLMode.default())) else: info.put("sslmode", "") # elif (info.iam is False) and any( # (info.credentials_provider, info.access_key_id, info.secret_access_key, info.session_token, info.profile) # ): # raise InterfaceError( # "Invalid connection property setting. IAM must be enabled when using credential_provider, " # "AWS credentials, Amazon Redshift authentication profile, or AWS profile" # ) if info.iam is True: _logger.debug("boto3 version: {}".format( Version(pkg_resources.get_distribution("boto3").version))) _logger.debug("botocore version: {}".format( Version(pkg_resources.get_distribution("botocore").version))) if info.cluster_identifier is None and not info.is_serverless_host: raise InterfaceError( "Invalid connection property setting. cluster_identifier must be provided when IAM is enabled" ) if info.credentials_provider is not None: if info.auth_profile is None and any( (info.access_key_id, info.secret_access_key, info.session_token, info.profile)): raise InterfaceError( "Invalid connection property setting. It is not valid to provide both Credentials provider and " "AWS credentials or AWS profile") elif not isinstance(info.credentials_provider, str): raise InterfaceError( "Invalid connection property setting. It is not valid to provide a non-string value to " "credentials_provider.") elif info.profile is not None: if info.auth_profile is None and any( (info.access_key_id, info.secret_access_key, info.session_token)): raise InterfaceError( "Invalid connection property setting. It is not valid to provide any of access_key_id, " "secret_access_key, or session_token when profile is provided" ) elif info.access_key_id is not None: if info.secret_access_key is not None: pass elif info.password != "": info.put("secret_access_key", info.password) _logger.debug( "Value of password will be used for secret_access_key") else: raise InterfaceError( "Invalid connection property setting. " "secret access key must be provided in either secret_access_key or password field" ) _logger.debug( "AWS Credentials access_key_id: {} secret_access_key: {} session_token: {}" .format(bool(info.access_key_id), bool(info.secret_access_key), bool(info.session_token))) elif info.secret_access_key is not None: raise InterfaceError( "Invalid connection property setting. access_key_id is required when secret_access_key is " "provided") elif info.session_token is not None: raise InterfaceError( "Invalid connection property setting. access_key_id and secret_access_key are required when " "session_token is provided") if info.db_groups and info.force_lowercase: info.put("db_groups", [group.lower() for group in info.db_groups]) # Check for IAM keys and AuthProfile first if info.auth_profile is not None: if Version(pkg_resources.get_distribution( "boto3").version) < Version("1.17.111"): raise pkg_resources.VersionConflict( "boto3 >= 1.17.111 required for authentication via Amazon Redshift authentication profile. " "Please upgrade the installed version of boto3 to use this functionality." ) if not all( (info.access_key_id, info.secret_access_key, info.region)): raise InterfaceError( "Invalid connection property setting. access_key_id, secret_access_key, and region are required " "for authentication via Redshift auth_profile") else: # info.put("region", info.region) # info.put("endpoint_url", info.endpoint_url) resp = IdpAuthHelper.read_auth_profile( auth_profile=typing.cast(str, info.auth_profile), iam_access_key_id=typing.cast(str, info.access_key_id), iam_secret_key=typing.cast(str, info.secret_access_key), iam_session_token=info.session_token, info=info, ) info.put_all(resp)
def install(installer, specs, working_set=None): """ A monkeypatch on zc.buildout.easy_install.Installer's install method. I'd tell you to look at the original method's docstring for more information about what this method does, but it doesn't have one. >.< """ # Yanked from zc.buildout==1.5.2, but attempts to be # backwards compatible. ##installer_logger.debug('Installing %s.', repr(specs)[1:-1]) path = installer._path destination = installer._dest if destination is not None and destination not in path: path.insert(0, destination) requirements = [ installer._constrain(pkg_resources.Requirement.parse(spec)) for spec in specs ] if working_set is None: ws = pkg_resources.WorkingSet([]) else: ws = working_set for requirement in requirements: for dist in installer._get_dist(requirement, ws): ws.add(dist) installer._maybe_add_setuptools(ws, dist) # OK, we have the requested distributions and they're in the working # set, but they may have unmet requirements. We'll resolve these # requirements. This is code modified from # pkg_resources.WorkingSet.resolve. We can't reuse that code directly # because we have to constrain our requirements (see # versions_section_ignored_for_dependency_in_favor_of_site_packages in # zc.buildout.tests). requirements.reverse() # Set up the stack. processed = {} # This is a set of processed requirements. best = {} # This is a mapping of key -> dist. # Note that we don't use the existing environment, because we want # to look for new eggs unless what we have is the best that # matches the requirement. env = pkg_resources.Environment(ws.entries) while requirements: # Process dependencies breadth-first. req = installer._constrain(requirements.pop(0)) if req in processed: # Ignore cyclic or redundant dependencies. continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map. dist = ws.by_key.get(req.key) if dist is None: try: dist = best[req.key] = env.best_match(req, ws) except pkg_resources.VersionConflict, err: raise pkg_resources.VersionConflict(err, ws) if dist is None or ( dist.location in installer._site_packages and not installer.allow_site_package_egg( dist.project_name)): # If we didn't find a distribution in the # environment, or what we found is from site # packages and not allowed to be there, try # again. if destination: installer_logger.debug('Getting required %r', str(req)) else: installer_logger.debug('Adding required %r', str(req)) #self._log_requirement(ws, req) for dist in installer._get_dist(req, ws): ws.add(dist) installer._maybe_add_setuptools(ws, dist) if req in autoextras_requirements: extras = self.get_wanted_extras(dist) if extras is None: extras = [] extras.extend(req.extras) else: extras = req.extras if dist not in req: # Oops, the "best" so far conflicts with a dependency. raise pkg_resources.VersionConflict( pkg_resources.VersionConflict(dist, req), ws) requirements.extend(dist.requires(extras)[::-1]) processed[req] = True # BBB zc.buildout<1.5 doesn't have a _site_packages attr. if hasattr(installer, '_site_packages' ) and dist.location in installer._site_packages: installer_logger.debug('Egg from site-packages: %s', dist)