def run(self, alert_id=None, alias=None): """ Retrieve details of alerts in OpsGenie. Args: - alert_id: Alert id of the alert. -alias: Alias of the alert. Raises: - ValueError: If alert_id and alias are None. Returns: - dict: Data from OpsGenie. """ payload = {} if alert_id: identifier = pathname2url(alert_id) payload['identifierType'] = 'id' elif alias: identifier = pathname2url(alias) payload['identifierType'] = 'alias' else: raise ValueError("Need one of alert_id or alias.") data = self._req("GET", "v2/alerts/" + identifier, payload=payload) return data
def __init__(self, file): base = urljoin("file:", pathname2url(os.getcwd())) system_id = URIRef(urljoin("file:", pathname2url(file.name)), base=base) super(FileInputSource, self).__init__(system_id) self.file = file self.setByteStream(file)
def setup_psd_pregenerated(workflow, tags=None): ''' Setup CBC workflow to use pregenerated psd files. The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank and pycbc_plot_psd_file. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the gating files ''' if tags is None: tags = [] psd_files = FileList([]) cp = workflow.cp global_seg = workflow.analysis_time user_tag = "PREGEN_PSD" # Check for one psd for all ifos try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file', tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urljoin('file:', pathname2url(pre_gen_file)) curr_file = File(workflow.ifos, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # Check for one psd per ifo for ifo in workflow.ifos: try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file-%s' % ifo.lower(), tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urljoin('file:', pathname2url(pre_gen_file)) curr_file = File(ifo, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # It's unlikely, but not impossible, that only some ifos # will have pregenerated PSDs logging.warn("No psd file specified for IFO %s." % (ifo,)) pass return psd_files
def resolve(self, parent_path=None): """Calculates the location at which this response should be stored as a file.""" filepath = self.filepath if not isinstance(filepath, string_types): raise ValueError("Invalid filepath [%r]" % filepath) if parent_path and isinstance(parent_path, string_types): return pathname2url(relate(filepath, parent_path)) return pathname2url(filepath)
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' data_keras_home = os.path.dirname(os.path.dirname(os.path.abspath(filepath))) os.remove(filepath) _keras_home = os.path.join(os.path.abspath('.'), '.keras') if not os.path.exists(_keras_home): os.makedirs(_keras_home) os.environ['KERAS_HOME'] = _keras_home path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' data_keras_home = os.path.dirname(os.path.dirname(os.path.abspath(filepath))) os.environ.pop('KERAS_HOME') shutil.rmtree(_keras_home) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove(os.path.join(os.path.dirname(path), 'test.txt')) os.remove('test.txt') os.remove('test.zip')
def resolve(self, parent_path=None): """Returns a relative url at which this resource should be accessed by the parent file. """ filepath = self.filepath if not isinstance(filepath, string_types): raise ValueError("Invalid filepath [%r]" % filepath) if parent_path and isinstance(parent_path, string_types): return pathname2url(relate(filepath, parent_path)) return pathname2url(filepath)
def run(self, alert_id=None, alias=None, user=None, note=None, source="StackStorm"): """ Close alert request is used to close open alerts in OpsGenie. Args: - alert_id: Id of the alert that will be closed. - alias: Alias of the alert that will be closed. - user: Default owner of the execution. - note: Additional alert note - source: User defined field to specify source of close action. Returns: - dict: Data from OpsGenie Raises: - ValueError: If alias and alert_id are None. """ body = {"source": source} parameters = {} if alert_id: identifier = pathname2url(alert_id) # default parameters['identifierType'] = 'id' elif alias: identifier = pathname2url(alias) parameters['identifierType'] = 'alias' else: raise ValueError("Need one of alias or alert_id to be set.") if user: if len(user) > 100: raise ValueError("user is too long, can't be over 100 chars.") else: body['user'] = user if note: if len(note) > 25000: raise ValueError( "note is too long, can't be over 25000 chars.") else: body['note'] = note data = self._req("POST", "v2/alerts/" + identifier + "/close", body=body, payload=parameters) return data
def test_get_file_and_validate_it(self): """Tests get_file from a url, plus extraction and validation. """ dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path = os.path.join(orig_dir, 'test.txt') zip_file_path = os.path.join(orig_dir, 'test.zip') tar_file_path = os.path.join(orig_dir, 'test.tar.gz') with open(text_file_path, 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open(tar_file_path, 'w:gz') as tar_file: tar_file.add(text_file_path) with zipfile.ZipFile(zip_file_path, 'w') as zip_file: zip_file.write(text_file_path) origin = urljoin('file://', pathname2url(os.path.abspath(tar_file_path))) path = keras.utils.data_utils.get_file('test.txt', origin, untar=True, cache_subdir=dest_dir) filepath = path + '.tar.gz' hashval_sha256 = keras.utils.data_utils._hash_file(filepath) hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5') path = keras.utils.data_utils.get_file( 'test.txt', origin, md5_hash=hashval_md5, untar=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( filepath, origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(filepath)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5)) os.remove(filepath) origin = urljoin('file://', pathname2url(os.path.abspath(zip_file_path))) hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path) hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path, algorithm='md5') path = keras.utils.data_utils.get_file( 'test', origin, md5_hash=hashval_md5, extract=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( 'test', origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(path)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5))
def path_to_url(path): """Convert a system path to a URL.""" if os.path.sep == '/': return path return pathname2url(path)
def get_openapi_schema(version): path = 'resources/schemas/v{0}/schema.json'.format(version) path_resource = resource_filename('connexion.openapi_spec_validator', path) path_full = os.path.join(os.path.dirname(__file__), path_resource) schema = read_yaml_file(path_full) schema_url = parse.urljoin('file:', request.pathname2url(path_full)) return schema, schema_url
def _quoteLocalPath(filename): """Quote local paths to file URL-s. CifFile reads files with urlopen, which fails for Windows paths or for paths containing ":". Parameters ---------- filename : str The path to be corrected. Returns ------- str The fixed URL when it contains ":" or `filename`. Return filename if it forms http or ftp URL. """ rv = filename cnvflag = False if ':' in filename: head = filename.split(':', 1)[0].lower() cnvflag = head.isalpha() and head not in ('http', 'https', 'ftp') if cnvflag: from six.moves.urllib.request import pathname2url rv = pathname2url(filename) return rv
def test_create_with_json_file_uri(self): # The contents of env_v2.json must be equivalent to ENVIRONMENT path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/env_v2.json' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) data = collections.OrderedDict( utils.load_content( utils.get_contents_if_file(uri) ) ) self.requests_mock.post(self.TEST_URL + URL_TEMPLATE, status_code=201, json=data) file_input = {'file': uri} env = self.environments.create(**file_input) self.assertIsNotNone(env) expected_data = copy.deepcopy(data) expected_data['variables'] = json.dumps(expected_data['variables']) self.assertEqual(expected_data, self.requests_mock.last_request.json())
def test_generate_hashes_with_editable(): small_fake_package_dir = os.path.join( os.path.split(__file__)[0], 'test_data', 'small_fake_package') small_fake_package_url = 'file:' + pathname2url(small_fake_package_dir) runner = CliRunner() with runner.isolated_filesystem(): with open('requirements.in', 'w') as fp: fp.write('-e {}\n'.format(small_fake_package_url)) fp.write('pytz==2017.2\n') out = runner.invoke( cli, [ '--generate-hashes', '--index-url', PyPIRepository.DEFAULT_INDEX_URL, ], ) expected = ( '#\n' '# This file is autogenerated by pip-compile\n' '# To update, run:\n' '#\n' '# pip-compile --generate-hashes --output-file requirements.txt requirements.in\n' '#\n' '-e {}\n' 'pytz==2017.2 \\\n' ' --hash=sha256:d1d6729c85acea5423671382868627129432fba9a89ecbb248d8d1c7a9f01c67 \\\n' ' --hash=sha256:f5c056e8f62d45ba8215e5cb8f50dfccb198b4b9fbea8500674f3443e4689589\n' ).format(small_fake_package_url) assert out.exit_code == 0 assert expected in out.output
def test_get_downloads_with_channels(self): """ Testing whether the API returns the right download when a download is added """ test_channel_name = 'testchan' def verify_download(downloads): downloads_json = json.loads(downloads) self.assertEqual(len(downloads_json['downloads']), 3) self.assertEqual(test_channel_name, [d for d in downloads_json["downloads"] if d["channel_download"]][0]["name"]) video_tdef, _ = self.create_local_torrent(os.path.join(TESTS_DATA_DIR, 'video.avi')) self.session.start_download_from_tdef(video_tdef, DownloadStartupConfig()) self.session.start_download_from_uri("file:" + pathname2url( os.path.join(TESTS_DATA_DIR, "bak_single.torrent"))) with db_session: my_channel = self.session.lm.mds.ChannelMetadata.create_channel(test_channel_name, 'test') my_channel.add_torrent_to_channel(video_tdef) torrent_dict = my_channel.commit_channel_torrent() self.session.lm.gigachannel_manager.updated_my_channel(TorrentDef.TorrentDef.load_from_dict(torrent_dict)) self.should_check_equality = False return self.do_request('downloads?get_peers=1&get_pieces=1', expected_code=200).addCallback(verify_download)
def run(self, scheduleIdentifier=None, scheduleIdentifierType=None, flat=None, date=None): """ List current oncall participants of schedule. Returns: - dict: Data from OpsGenie. """ payload = {} if scheduleIdentifier: identifier = pathname2url(scheduleIdentifier) if scheduleIdentifierType: payload["scheduleIdentifierType"] = scheduleIdentifierType if flat: payload["flat"] = flat if date: payload["date"] = date data = self._req("GET", "v2/schedules/" + identifier + "/on-calls", payload=payload) return data
def get_ipn_sky_files(workflow, file_url, tags=None): ''' Retreive the sky point files for searching over the IPN error box and populating it with injections. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. file_url : string The URL of the IPN sky points file. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- sky_points_file : pycbc.workflow.core.File File object representing the IPN sky points file. ''' tags = tags or [] ipn_sky_points = resolve_url(file_url) sky_points_url = urljoin("file:", pathname2url(ipn_sky_points)) sky_points_file = File(workflow.ifos, "IPN_SKY_POINTS", workflow.analysis_time, file_url=sky_points_url, tags=tags) sky_points_file.PFN(sky_points_url, site="local") return sky_points_file
def get_swagger_spec(settings): """Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec` """ schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs/') schema_filename = settings.get('pyramid_swagger.schema_file', 'swagger.json') schema_path = os.path.join(schema_dir, schema_filename) schema_url = urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_path))) handlers = build_http_handlers(None) # don't need http_client for file: file_handler = handlers['file'] spec_dict = file_handler(schema_url) return Spec.from_dict( spec_dict, config=create_bravado_core_config(settings), origin_url=schema_url)
def get_search_data(helper): search_query = None search_name = helper.settings.get('search_name') results_link = helper.settings.get('results_link') search_uri = helper.settings.get('search_uri') helper.log_info('Alert name is ' + search_name) helper.log_info('Search URI is ' + search_uri) helper.log_info('Manually created Search URI is ' + '/services/saved/searches/' + quote(search_name)) if not search_name: helper.log_info('Creating search uri') search_app_name = helper.settings.get('app', '') if '|' in search_app_name: search_name = '//|'.join(search_app_name.split('|')) search_uri = pathname2url('/services/saved/searches/' + quote(search_name)) r = splunk.rest.simpleRequest(search_uri, sessionKey=helper.session_key, getargs={'output_mode': 'json'}, method='GET') result_op = json.loads(r[1]) if len(result_op['entry']) > 0: search_query = result_op['entry'][0]['content']['qualifiedSearch'] helper.log_info('Search query is ' + search_query) return search_query, search_name, results_link
def test_locally_available_editable_package_is_not_archived_in_cache_dir( tmpdir): """ piptools will not create an archive for a locally available editable requirement """ cache_dir = tmpdir.mkdir('cache_dir') fake_package_dir = os.path.join( os.path.split(__file__)[0], 'test_data', 'small_fake_package') fake_package_dir = 'file:' + pathname2url(fake_package_dir) with mock.patch('piptools.repositories.pypi.CACHE_DIR', new=str(cache_dir)): runner = CliRunner() with runner.isolated_filesystem(): with open('requirements.in', 'w') as req_in: req_in.write('-e ' + fake_package_dir) # require editable fake package out = runner.invoke(cli, ['-n']) assert out.exit_code == 0 assert fake_package_dir in out.output assert 'six==1.10.0' in out.output # we should not find any archived file in {cache_dir}/pkgs assert not os.listdir(os.path.join(str(cache_dir), 'pkgs'))
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None): """ Creates a node in the workflow for writing the veto_definer table. Returns a File instances for the output file. """ if vetodef_file is None: if not workflow.cp.has_option_tags("workflow-segments", "segments-veto-definer-file", []): return None vetodef_file = workflow.cp.get_opt_tags("workflow-segments", "segments-veto-definer-file", []) file_url = urljoin('file:', pathname2url(vetodef_file)) vdf_file = File(workflow.ifos, 'VETO_DEFINER', workflow.analysis_time, file_url=file_url) vdf_file.PFN(file_url, site='local') else: vdf_file = vetodef_file if tags is None: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--veto-definer-file', vdf_file) node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') workflow += node return node.output_files[0]
def test_stress_file_publish(self): """Publish lots of packages rapidly ensuring that file publication can handle it.""" location = self.dc.get_repodir() location = os.path.abspath(location) location = urlunparse(("file", "", pathname2url(location), "", "", "")) repouriobj = publisher.RepositoryURI(location) repo = publisher.Repository(origins=[repouriobj]) pub = publisher.Publisher(prefix="repo1", repository=repo) xport_cfg = transport.GenericTransportCfg() xport_cfg.add_publisher(pub) xport = transport.Transport(xport_cfg) # Each version number must be unique since multiple packages # will be published within the same second. for i in range(100): pf = fmri.PkgFmri("foo@{0:d}.0".format(i)) t = trans.Transaction(location, pkg_name=str(pf), xport=xport, pub=pub) t.open() pkg_fmri, pkg_state = t.close() self.debug("{0}: {1}".format(pkg_fmri, pkg_state))
def path_to_local_file_uri(path): """Convert local filesystem path to local file uri.""" path = pathname2url(path) if path == posixpath.abspath(path): return 'file://{path}'.format(path=path) else: return 'file:{path}'.format(path=path)
def path_to_local_sqlite_uri(path): """ Convert local filesystem path to sqlite uri. """ path = posixpath.abspath(pathname2url(os.path.abspath(path))) prefix = "sqlite://" if sys.platform == "win32" else "sqlite:///" return prefix + path
def test_create_with_json_file_uri(self): # The contents of env_v2.json must be equivalent to ENVIRONMENT path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/env_v2.json' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) data = OrderedDict( utils.load_content( utils.get_contents_if_file(uri) ) ) mock = self.mock_http_post(content=data) file_input = {'file': uri} env = self.environments.create(**file_input) self.assertIsNotNone(env) expected_data = copy.deepcopy(data) expected_data['variables'] = json.dumps(expected_data['variables']) mock.assert_called_once_with(URL_TEMPLATE, json.dumps(expected_data))
def get_swagger_spec(settings): """Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec` """ schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs/') schema_filename = settings.get('pyramid_swagger.schema_file', 'swagger.json') schema_path = os.path.join(schema_dir, schema_filename) schema_url = urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_path))) handlers = build_http_handlers(None) # don't need http_client for file: file_handler = handlers['file'] spec_dict = file_handler(schema_url) return Spec.from_dict(spec_dict, config=create_bravado_core_config(settings), origin_url=schema_url)
def path_to_file_uri(path): """Convert local filesystem path to legal File URIs as described in: http://en.wikipedia.org/wiki/File_URI_scheme """ x = pathname2url(os.path.abspath(path)) if os.name == 'nt': x = x.replace('|', ':') # http://bugs.python.org/issue5861 return 'file:///%s' % x.lstrip('/')
def path_to_url(path): """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. """ path = os.path.normpath(os.path.abspath(path)) url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) return url
def _authenticated_server_proxy(self): r""" Get an XML-RPC proxy object that is authenticated using the users username and password. EXAMPLES:: sage: dev.trac._authenticated_server_proxy # not tested Trac username: username Trac password: Should I store your password in a configuration file for future sessions? (This configuration file might be readable by privileged users on this system.) [yes/No] <ServerProxy for trac.sagemath.org/login/xmlrpc> TESTS: To make sure that doctests do not tamper with the live trac server, it is an error to access this property during a doctest (The ``dev`` object during doctests is also modified to prevent this):: sage: from sage.dev.test.config import DoctestConfig sage: from sage.dev.test.user_interface import DoctestUserInterface sage: from sage.dev.trac_interface import TracInterface sage: config = DoctestConfig() sage: trac = TracInterface(config['trac'], DoctestUserInterface(config['UI'])) sage: trac._authenticated_server_proxy Traceback (most recent call last): ... AssertionError: doctest tried to access an authenticated session to trac """ import sage.doctest assert not sage.doctest.DOCTEST_MODE, \ "doctest tried to access an authenticated session to trac" self._check_password_timeout() if self.__authenticated_server_proxy is None: from sage.env import REALM realm = self._config.get('realm', REALM) from sage.env import TRAC_SERVER_URI server = self._config.get('server', TRAC_SERVER_URI) url = urljoin(server, pathname2url(os.path.join('login', 'xmlrpc'))) while True: from xmlrpclib import ServerProxy from digest_transport import DigestTransport from trac_error import TracAuthenticationError transport = DigestTransport() transport.add_authentication(realm=realm, url=server, username=self._username, password=self._password) proxy = ServerProxy(url, transport=transport) try: proxy.system.listMethods() break except TracAuthenticationError: self._UI.error("Invalid username/password") self.reset_username() self.__authenticated_server_proxy = proxy self._postpone_password_timeout() return self.__authenticated_server_proxy
def absolutize(self, uri, defrag=1): base = urljoin("file:", pathname2url(os.getcwd())) result = urljoin("%s/" % base, uri, allow_fragments=not defrag) if defrag: result = urldefrag(result)[0] if not defrag: if uri and uri[-1] == "#" and result[-1] != "#": result = "%s#" % result return URIRef(result)
def make_element(self, elem, attr, url, pos): LOGGER.debug( "Generating element for tag <%s>:[%s] [url] <%s> [attr] <%s> [pos] <%s>" % (elem.tag, elem, url, attr, pos) ) if self._validate_url(url): LOGGER.debug("Url was valid: [%s]" % url) else: LOGGER.debug('Url was not valid: [%s]' % url) return utx = self._get_utx() assert utx is not None, "WebPage utx not set." assert utx.file_path is not None, "WebPage file_path is not generated by utx!" tag = getattr(elem, 'tag', 'default') klass = self._make_element(tag) if klass is None: return # Populate the object with basic properties obj = klass(url, base_url=utx.base_url, base_path=utx.base_path) # # obj.tag = tag # A tag specifier is required # # assert obj.file_path is not None, "File Path was not generated by the handler." # # #: Calculate a path relative from the parent WebPage object # obj.rel_path = cached_path2url_relate(obj.file_path, utx.file_path) # rel_path = pathname2url(obj.relative_to(utx.file_path)) assert rel_path is not None, "Relative Path was not generated by the handler." # Remove integrity or cors check from the file elem.attrib.pop('integrity', None) elem.attrib.pop('crossorigin', None) # Change the url in the object depending on the case if attr is None: new = elem.text[:pos] + rel_path + elem.text[len(url) + pos:] elem.text = new else: cur = elem.get(attr) if not pos and len(cur) == len(url): new = rel_path # most common case else: new = cur[:pos] + rel_path + cur[pos + len(url):] elem.set(attr, new) LOGGER.debug("Remapped url [%s] to the path [%s]" % (url, rel_path)) return obj
def path_to_local_file_uri(path): """ Convert local filesystem path to uri with the given scheme. """ path = pathname2url(path) if path == posixpath.abspath(path): return "file://{path}".format(path=path) else: return "file:{path}".format(path=path)
def setUp(self): """Setup.""" self.gcold = gc.isenabled() gc.collect() gc.disable() self.graph = Graph(store=self.store) if self.path is None: self.path = pathname2url(mkdtemp()) self.graph.open(self.path) self.input = Graph()
def run(self, alert_id=None, alias=None, user=None, source="StackStorm"): """ Delete an OpsGenie alert. Args - alert_id: Id of the alert that will be deleted. - alias: Alias of the alert will be deleted. - user: Default owner of the execution. - source: User defined field to specify source of delete action. Returns: - dict: Data from OpsGenie. Raises: - ValueError: If alert_id and alias are None. """ payload = {} if source: if len(source) > 100: raise ValueError( "source is too long, can't be over 100 chars.") else: payload['source'] = source if alert_id: identifier = pathname2url(alert_id) payload['identifierType'] = 'id' elif alias: identifier = pathname2url(alias) payload['identifierType'] = 'alias' else: raise ValueError("Need one of alert_id or alias.") if user: if len(user) > 100: raise ValueError("User is too long, can't be over 100 chars.") else: payload["user"] = "******" data = self._req("DELETE", "v2/alerts/" + identifier, payload=payload) return data
def read(self, request, vendor, name, version): from_version = request.GET.get('from') if from_version is not None: try: from_version = Version(from_version) except: return build_error_response( request, 422, _("Missing parameter: template_uri or file")) resource = get_object_or_404(CatalogueResource, vendor=vendor, short_name=name, version=version) resource_info = resource.get_processed_info(process_urls=False) if resource_info['changelog'] == '': raise Http404 doc_relative_path = url2pathname(resource_info['changelog']) doc_base_url = force_trailing_slash( urljoin(resource.get_template_url(request=request, for_base=True), pathname2url(os.path.dirname(doc_relative_path)))) doc_path = os.path.join( catalogue_utils.wgt_deployer.get_base_dir(vendor, name, version), doc_relative_path) (doc_filename_root, doc_filename_ext) = os.path.splitext(doc_path) localized_doc_path = doc_filename_root + '.' + get_language( ) + doc_filename_ext try: doc_code = download_local_file(localized_doc_path).decode('utf-8') except: try: doc_code = download_local_file(doc_path).decode('utf-8') except: msg = _('Error opening the changelog file') doc_code = '<div class="margin-top: 10px"><p>%s</p></div>' % msg doc_pre_html = markdown.markdown(doc_code, output_format='xhtml5', extensions=[ 'markdown.extensions.codehilite', 'markdown.extensions.fenced_code' ]) if from_version: doc_pre_html = filter_changelog(doc_pre_html, from_version) if doc_pre_html.strip() == '': raise Http404 doc = clean_html(doc_pre_html, base_url=doc_base_url) return HttpResponse( doc, content_type='application/xhtml+xml; charset=UTF-8')
def test_data_utils(in_tmpdir): """Tests get_file from a url, plus extraction and validation. """ dirname = 'data_utils' with open('test.txt', 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open('test.tar.gz', 'w:gz') as tar_file: tar_file.add('test.txt') with zipfile.ZipFile('test.zip', 'w') as zip_file: zip_file.write('test.txt') origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz'))) path = get_file(dirname, origin, untar=True) filepath = path + '.tar.gz' hashval_sha256 = _hash_file(filepath) hashval_md5 = _hash_file(filepath, algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True) path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(filepath) assert validate_file(filepath, hashval_sha256) assert validate_file(filepath, hashval_md5) os.remove(filepath) os.remove('test.tar.gz') origin = urljoin('file://', pathname2url(os.path.abspath('test.zip'))) hashval_sha256 = _hash_file('test.zip') hashval_md5 = _hash_file('test.zip', algorithm='md5') path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True) path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True) assert os.path.exists(path) assert validate_file(path, hashval_sha256) assert validate_file(path, hashval_md5) os.remove(path) os.remove('test.txt') os.remove('test.zip')
def test_editable_package(tmpdir): """ piptools can compile an editable """ fake_package_dir = os.path.join(os.path.split(__file__)[0], 'test_data', 'small_fake_package') fake_package_dir = 'file:' + pathname2url(fake_package_dir) runner = CliRunner() with runner.isolated_filesystem(): with open('requirements.in', 'w') as req_in: req_in.write('-e ' + fake_package_dir) # require editable fake package out = runner.invoke(cli, ['-n']) assert out.exit_code == 0 assert fake_package_dir in out.output assert 'six==1.10.0' in out.output
def on_confirm_add_directory_dialog(self, action): if action == 0: for torrent_file in self.selected_torrent_files: escaped_uri = u"file:%s" % pathname2url(torrent_file.encode('utf-8')) self.perform_start_download_request(escaped_uri, self.window().tribler_settings['download_defaults'][ 'anonymity_enabled'], self.window().tribler_settings['download_defaults'][ 'safeseeding_enabled'], self.tribler_settings['download_defaults']['saveas'], [], 0) if self.dialog: self.dialog.close_dialog() self.dialog = None
def test_live_downloads(self): QTest.mouseClick(window.left_menu_button_home, Qt.LeftButton) QTest.mouseClick(window.home_tab_torrents_button, Qt.LeftButton) self.screenshot(window, name="home_page_torrents_loading") # Start downloading some torrents if 'TORRENTS_DIR' in os.environ: torrent_dir = os.environ.get('TORRENTS_DIR') else: torrent_dir = os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), "data", "linux_torrents") window.selected_torrent_files = [pathname2url(torrent_file) for torrent_file in glob.glob(torrent_dir + "/*.torrent")] window.on_confirm_add_directory_dialog(0) self.go_to_and_wait_for_downloads() QTest.qWait(2000) with open(output_file, "w") as output: output.write("time, upload, download\n") def download_refreshed(_): line = "%s, %s, %s\n" % (time.time(), window.downloads_page.total_upload/1000, window.downloads_page.total_download/1000) output.write(line) window.downloads_page.received_downloads.connect(download_refreshed) QTest.qWait(test_timeout) # Stop downloads after timeout window.downloads_page.received_downloads.disconnect() window.downloads_page.stop_loading_downloads() QTest.qWait(5000) # Plot graph data = numpy.genfromtxt(output_file, delimiter=',', skip_header=1, skip_footer=0, names=['time', 'upload', 'download']) figure = plot.figure() subplot = figure.add_subplot(111) subplot.set_title("Live downloads plot") subplot.set_xlabel('Time (seconds)') subplot.set_ylabel('Speed (kB/s)') subplot.plot(data['time'], data['upload'], color='g', label='upload') subplot.plot(data['time'], data['download'], color='r', label='download') subplot.legend() figure.savefig(output_file + '.png', bbox_inches='tight')
def get_contents_if_file(contents_or_file_name): """由文件名获取utf8解码的byte字符串 """ try: if parse.urlparse(contents_or_file_name).scheme: definition_url = contents_or_file_name else: path = os.path.abspath(contents_or_file_name) # 'file:///home/jf/scripts/test/my_async.yaml' definition_url = parse.urljoin( 'file:', request.pathname2url(path) ) return request.urlopen(definition_url).read().decode('utf8') except Exception: return contents_or_file_name
def get_link_path(target, base): """Returns a relative path to a target from a base. If base is an existing file, then its parent directory is considered. Otherwise, base is assumed to be a directory. Rationale: os.path.relpath is not available before Python 2.6 """ path = _get_pathname(target, base) # Windows Python 3 pathname2url doesn't accept bytes url = pathname2url(path if PY3 else path.encode('UTF-8')) if os.path.isabs(path): url = 'file:' + url # At least Jython seems to use 'C|/Path' and not 'C:/Path' if os.sep == '\\' and '|/' in url: url = url.replace('|/', ':/', 1) return url.replace('%5C', '/').replace('%3A', ':').replace('|', ':')
def deploy(self, wgt_file): template_content = wgt_file.get_template() template_parser = TemplateParser(template_content) widget_rel_dir = os.path.join( template_parser.get_resource_vendor(), template_parser.get_resource_name(), template_parser.get_resource_version(), ) widget_dir = os.path.join(self._root_dir, widget_rel_dir) template_parser.set_base(pathname2url(widget_rel_dir) + '/') self._create_folders(widget_dir) wgt_file.extract(widget_dir) return template_parser
def write_item(self, item): title = item.get('title', 'Untitled') header = """<html lang="en"> <head> <meta charset="utf-8" /> <title>%s</title> </head> <body> """ % title body = self.make_body(item, title) closer = """ </body> </html> """ url = item['location'] media_guid = hashlib.sha1(url).hexdigest() media_ext = '.html' path = 'full/%s%s' % (media_guid, media_ext) absolute_path = os.path.join(self.store.basedir, path) with codecs.open(absolute_path, 'wb', 'utf-8') as f: f.write(header) f.write(body) f.write(closer) item['inline_urls'] = [ urljoin('file://', pathname2url(absolute_path)) ] item['inline_metas'] = [ { 'link_url': item['request_url'], 'location': item['location'], 'title': title, 'content_type': 'text/html'} ] checksum = None with open(absolute_path, 'rb') as f: checksum = md5sum(f) # Compatible with Twisted Deferred results results = [ (True, {'url': url, 'path': path, 'checksum': checksum } ) ] item = self.item_completed(results, item, self.spiderinfo) return item
def validate_swagger_schema(schema_dir, resource_listing): """Validate the structure of Swagger schemas against the spec. **Valid only for Swagger v1.2 spec** Note: It is possible that resource_listing is not present in the schema_dir. The path is passed in the call so that ssv can fetch the api-declaration files from the path. :param resource_listing: Swagger Spec v1.2 resource listing :type resource_listing: dict :param schema_dir: A path to Swagger spec directory :type schema_dir: string :raises: :py:class:`swagger_spec_validator.SwaggerValidationError` """ schema_filepath = os.path.join(schema_dir, API_DOCS_FILENAME) swagger_spec_validator.validator12.validate_spec( resource_listing, urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_filepath))), )
def test_file_string_or_uri(self): data = '{ "some": "data here"}' with tempfile.NamedTemporaryFile(delete=False) as tmp: tmp.write(data.encode('utf-8')) tmp.close() output = _load_file_string_or_uri(tmp.name, 'test') self.assertEqual(get_file_json(tmp.name), output) uri = urljoin('file:', pathname2url(tmp.name)) output = _load_file_string_or_uri(uri, 'test') self.assertEqual(get_file_json(tmp.name), output) os.unlink(tmp.name) output = _load_file_string_or_uri(data, 'test') self.assertEqual(shell_safe_json_parse(data), output) self.assertEqual(None, _load_file_string_or_uri(None, 'test', required=False)) self.assertRaises(CLIError, _load_file_string_or_uri, None, 'test')
def test_locally_available_editable_package_is_not_archived_in_cache_dir(tmpdir): """ piptools will not create an archive for a locally available editable requirement """ cache_dir = tmpdir.mkdir('cache_dir') fake_package_dir = os.path.join(os.path.split(__file__)[0], 'test_data', 'small_fake_package') fake_package_dir = 'file:' + pathname2url(fake_package_dir) with mock.patch('piptools.repositories.pypi.CACHE_DIR', new=str(cache_dir)): runner = CliRunner() with runner.isolated_filesystem(): with open('requirements.in', 'w') as req_in: req_in.write('-e ' + fake_package_dir) # require editable fake package out = runner.invoke(cli, ['-n']) assert out.exit_code == 0 assert fake_package_dir in out.output assert 'six==1.10.0' in out.output # we should not find any archived file in {cache_dir}/pkgs assert not os.listdir(os.path.join(str(cache_dir), 'pkgs'))
def get_pbm_wsdl_location(vc_version): """Return PBM WSDL file location corresponding to VC version. :param vc_version: a dot-separated version string. For example, "1.2". :return: the pbm wsdl file location. """ if not vc_version: return ver = vc_version.split('.') major_minor = ver[0] if len(ver) >= 2: major_minor = '%s.%s' % (major_minor, ver[1]) curr_dir = os.path.abspath(os.path.dirname(__file__)) pbm_service_wsdl = os.path.join(curr_dir, 'wsdl', major_minor, 'pbmService.wsdl') if not os.path.exists(pbm_service_wsdl): LOG.warning(_LW("PBM WSDL file %s not found."), pbm_service_wsdl) return pbm_wsdl = urlparse.urljoin('file:', urllib.pathname2url(pbm_service_wsdl)) LOG.debug("Using PBM WSDL location: %s.", pbm_wsdl) return pbm_wsdl
def test_update_with_file_uri(self): self.requests_mock.put(self.TEST_URL + URL_TEMPLATE_SCOPE, json={'workflows': [WORKFLOW]}) # The contents of wf_v2.yaml must be identical to WF_DEF path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/wf_v2.yaml' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) wfs = self.workflows.update(uri) self.assertIsNotNone(wfs) self.assertEqual(WF_DEF, wfs[0].definition) last_request = self.requests_mock.last_request self.assertEqual(WF_DEF, last_request.text) self.assertEqual('text/plain', last_request.headers['content-type'])
def test_update_with_file_uri(self): mock = self.mock_http_put(content={'workflows': [WORKFLOW]}) # The contents of wf_v2.yaml must be identical to WF_DEF path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/wf_v2.yaml' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) wfs = self.workflows.update(uri) self.assertIsNotNone(wfs) self.assertEqual(WF_DEF, wfs[0].definition) mock.assert_called_once_with( URL_TEMPLATE_SCOPE, WF_DEF, headers={'content-type': 'text/plain'} )
def test_update_with_file_uri(self): mock = self.mock_http_put(content={'actions': [ACTION]}) # The contents of action_v2.yaml must be identical to ACTION_DEF path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/action_v2.yaml' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) actions = self.actions.update(uri) self.assertIsNotNone(actions) self.assertEqual(ACTION_DEF, actions[0].definition) mock.assert_called_once_with( URL_TEMPLATE_SCOPE, ACTION_DEF, headers={'content-type': 'text/plain'} )
def test_create_with_file_uri(self): mock = self.mock_http_post(content=WORKBOOK) # The contents of wb_v2.yaml must be identical to WB_DEF path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/wb_v2.yaml' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) wb = self.workbooks.create(uri) self.assertIsNotNone(wb) self.assertEqual(WB_DEF, wb.definition) mock.assert_called_once_with( URL_TEMPLATE, WB_DEF, headers={'content-type': 'text/plain'} )
def test_update_with_file_uri(self): self.requests_mock.put(self.TEST_URL + URL_TEMPLATE, json={'actions': [ACTION]}) # The contents of action_v2.yaml must be identical to ACTION_DEF path = pkg.resource_filename( 'mistralclient', 'tests/unit/resources/action_v2.yaml' ) # Convert the file path to file URI uri = parse.urljoin('file:', request.pathname2url(path)) actions = self.actions.update(uri) self.assertIsNotNone(actions) self.assertEqual(ACTION_DEF, actions[0].definition) last_request = self.requests_mock.last_request self.assertEqual('scope=private', last_request.query) self.assertEqual('text/plain', last_request.headers['content-type']) self.assertEqual(ACTION_DEF, last_request.text)
def _build_swagger_20_schema_views(config): spec = config.registry.settings['pyramid_swagger.schema20'] walker = NodeWalkerForRefFiles() all_files = walker.walk(spec) file_map = {} def view_for_swagger_schema(request): _, ext = os.path.splitext(request.path) ext = ext.lstrip('.') base_path = config.registry.settings\ .get('pyramid_swagger.base_path_api_docs', '').rstrip('/') key_path = request.path_info[len(base_path):] actual_fname = file_map[key_path] with spec.resolver.resolving(actual_fname) as spec_dict: clean_response = strip_xscope(spec_dict) ref_walker = NodeWalkerForCleaningRefs() fixed_spec = ref_walker.walk(clean_response, ext) return fixed_spec for ref_fname in all_files: ref_fname_parts = os.path.splitext(pathname2url(ref_fname)) for schema_format in ['yaml', 'json']: route_name = 'pyramid_swagger.swagger20.api_docs.{0}.{1}'\ .format(ref_fname.replace('/', '.'), schema_format) path = '/{0}.{1}'.format(ref_fname_parts[0], schema_format) file_map[path] = ref_fname yield PyramidEndpoint( path=path, route_name=route_name, view=view_for_swagger_schema, renderer=schema_format, )
def get_contents_if_file(contents_or_file_name): """Get the contents of a file. If the value passed in is a file name or file URI, return the contents. If not, or there is an error reading the file contents, return the value passed in as the contents. For example, a workflow definition will be returned if either the workflow definition file name, or file URI are passed in, or the actual workflow definition itself is passed in. """ try: if parse.urlparse(contents_or_file_name).scheme: definition_url = contents_or_file_name else: path = os.path.abspath(contents_or_file_name) definition_url = parse.urljoin( 'file:', request.pathname2url(path) ) return request.urlopen(definition_url).read().decode('utf8') except Exception: return contents_or_file_name
def run_and_export(self, spider_cls, settings=None): """ Run spider with specified settings; return exported data. """ tmpdir = tempfile.mkdtemp() res_path = os.path.join(tmpdir, 'res') res_uri = urljoin('file:', pathname2url(res_path)) defaults = { 'FEED_URI': res_uri, 'FEED_FORMAT': 'csv', } defaults.update(settings or {}) try: with MockServer() as s: runner = CrawlerRunner(Settings(defaults)) spider_cls.start_urls = [s.url('/')] yield runner.crawl(spider_cls) with open(res_path, 'rb') as f: content = f.read() finally: shutil.rmtree(tmpdir, ignore_errors=True) defer.returnValue(content)