def apply_absolute_prefix(theme_doc, absolute_prefix): if not absolute_prefix: return if not absolute_prefix.endswith('/'): absolute_prefix = absolute_prefix + '/' for node in theme_doc.xpath('//*[@src]'): url = urljoin(absolute_prefix, node.get('src')) node.set('src', url) for node in theme_doc.xpath('//*[@srcset]'): srcset = node.get('srcset') srcset = SRCSET.sub( lambda match: match.group('descriptors') + urljoin( absolute_prefix, match.group('url')), srcset) node.set('srcset', srcset) for node in theme_doc.xpath('//*[@href]'): url = anchor_safe_urljoin(absolute_prefix, node.get('href')) node.set('href', url) for node in theme_doc.xpath('//style'): if node.text is None: continue node.text = IMPORT_STYLESHEET.sub( lambda match: match.group('before') + urljoin( absolute_prefix, match.group('url')) + match.group('after'), node.text) for node in theme_doc.xpath('//comment()[starts-with(., "[if")]'): node.text = IMPORT_STYLESHEET.sub( lambda match: match.group('before') + urljoin( absolute_prefix, match.group('url')) + match.group('after'), node.text) node.text = CONDITIONAL_SRC.sub( lambda match: match.group('before') + urljoin( absolute_prefix, match.group('url')) + match.group('after'), node.text)
def Initialize(self): """Initializes state in preparation for running end-to-end tests. Only needs to be called once. """ appveyor_root_url = os.environ.get(self.APPVEYOR_API_VARNAME, None) if appveyor_root_url: logging.info("Using Appveyor API at %s", appveyor_root_url) # See https://www.appveyor.com/docs/build-worker-api/ self._appveyor_tests_endpoint = urlparse.urljoin(appveyor_root_url, "api/tests") self._appveyor_messages_endpoint = urlparse.urljoin( appveyor_root_url, "api/build/messages") logging.info("Connecting to GRR API at %s", self._api_endpoint) password = self._api_password if not password: password = getpass.getpass(prompt="Please enter the API password for " "user '%s': " % self._api_user) self._grr_api = api.InitHttp( api_endpoint=self._api_endpoint, auth=(self._api_user, password)) # Make sure binaries required by tests are uploaded to the datastore. if self._upload_test_binaries: binary_paths = self._GetUploadedBinaries() if self.LINUX_TEST_BINARY_PATH not in binary_paths: self._UploadBinary(self.LINUX_TEST_BINARY_NAME, self.LINUX_TEST_BINARY_PATH) if self.WINDOWS_TEST_BINARY_PATH not in binary_paths: self._UploadBinary(self.WINDOWS_TEST_BINARY_NAME, self.WINDOWS_TEST_BINARY_PATH)
def __init__(self, url, username, password, user): self.url = url self.user = user self.auth = HTTPDigestAuth(username, password) self.form_list_url = urljoin(self.url, 'formList') self.submission_list_url = urljoin(self.url, 'view/submissionList') self.download_submission_url = urljoin(self.url, 'view/downloadSubmission') self.forms_path = os.path.join( self.user.username, 'briefcase', 'forms') self.resumption_cursor = 0 self.logger = logging.getLogger('console_logger')
def enketo_url(form_url, id_string, instance_xml=None, instance_id=None, return_url=None, **kwargs): """Return Enketo webform URL.""" if (not hasattr(settings, 'ENKETO_URL') or not hasattr(settings, 'ENKETO_API_SURVEY_PATH') or not hasattr(settings, 'ENKETO_API_TOKEN') or settings.ENKETO_API_TOKEN == ''): return False url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_SURVEY_PATH) values = {'form_id': id_string, 'server_url': form_url} if instance_id is not None and instance_xml is not None: url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH) values.update({ 'instance': instance_xml, 'instance_id': instance_id, # convert to unicode string in python3 compatible way 'return_url': u'%s' % return_url }) if kwargs: # Kwargs need to take note of xform variable paths i.e. # kwargs = {'defaults[/widgets/text_widgets/my_string]': "Hey Mark"} values.update(kwargs) response = requests.post( url, data=values, auth=(settings.ENKETO_API_TOKEN, ''), verify=getattr(settings, 'VERIFY_SSL', True)) resp_content = response.content resp_content = resp_content.decode('utf-8') if hasattr( resp_content, 'decode') else resp_content if response.status_code in [200, 201]: try: data = json.loads(resp_content) except ValueError: pass else: url = (data.get('edit_url') or data.get('offline_url') or data.get('url')) if url: return url handle_enketo_error(response)
def expand_themes( rules_doc, parser=None, absolute_prefix=None, read_network=False, ): """Expand <theme href='...'/> nodes with the theme html. """ if absolute_prefix is None: absolute_prefix = '' base = rules_doc.docinfo.URL if parser is None: parser = etree.HTMLParser() for element in rules_doc.xpath( '//diazo:theme[@href]', namespaces=namespaces, ): url = urljoin(base, element.get('href')) if not read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): raise ValueError( "Supplied theme '{url}', but network access denied.".format( url=url, ), ) elif read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): theme = urlopen(url) else: theme = url theme_doc = etree.parse(theme, parser=parser, base_url=url) expand_theme(element, theme_doc, absolute_prefix) return rules_doc
def logout(self): """ Logout from the backend :return: return True if logout is successfull, otherwise False :rtype: bool """ if not self.token or not self.authenticated: logger.warning("Unnecessary logout ...") return True logger.debug("request backend logout") try: response = requests.post( urljoin(self.url_endpoint_root, 'logout'), auth=HTTPBasicAuth(self.token, '') ) response.raise_for_status() except Timeout as e: # pragma: no cover - need specific backend tests logger.error("Backend connection timeout, error: %s", str(e)) raise BackendException(1002, "Backend connection timeout") except HTTPError as e: # pragma: no cover - need specific backend tests logger.error("Backend HTTP error, error: %s", str(e)) raise BackendException(1003, "Backend HTTPError: %s / %s" % (type(e), str(e))) except Exception as e: # pragma: no cover - security ... logger.error("Backend connection exception, error: %s / %s", type(e), str(e)) raise BackendException(1000, "Backend exception: %s / %s" % (type(e), str(e))) self.authenticated = False self.token = None return True
def update_project_members(self, name, users): endpoint = "/api/v1/projects/{}/members".format(name) if not isinstance(users, list): users = list(users) data = [{'id': user.id, 'admin': user.get('admin', False)} for user in users] url = urljoin(self.api_base_url, endpoint) return self._request(url, method='PUT', json=data)
def safe_put_data(ranking, resource, data, operation): """Send some data to ranking using a PUT request. ranking (bytes): the URL of ranking server. resource (bytes): the relative path of the entity. data (dict): the data to JSON-encode and send. operation (unicode): a human-readable description of the operation we're performing (to produce log messages). raise (CannotSendError): in case of communication errors. """ try: url = urljoin(ranking, resource) # XXX With requests-1.2 auth is automatically extracted from # the URL: there is no need for this. auth = urlsplit(url) res = requests.put(url, json.dumps(data), auth=(auth.username, auth.password), headers={'content-type': 'application/json'}, verify=config.https_certfile) except requests.exceptions.RequestException as error: msg = "%s while %s: %s." % (type(error).__name__, operation, error) logger.warning(msg) raise CannotSendError(msg) if 400 <= res.status_code < 600: msg = "Status %s while %s." % (res.status_code, operation) logger.warning(msg) raise CannotSendError(msg)
def get(self, url): super(WebDriverMixin, self).get('about:blank') full_url = urljoin(str(self._base_url), str(url)) self.execute_script( """ window.name = "{}" + window.name; window.location.replace("{}"); """.format(DEFER_LABEL, full_url) ) wait = WebDriverWait(self, self._test_timeout) wait.until_not(self._location_equals, 'about:blank') if not self.ignore_synchronization: test_result = self._test_for_angular() angular_on_page = test_result[0] if not angular_on_page: message = test_result[1] raise AngularNotFoundException( 'Angular could not be found on page: {}:' ' {}'.format(full_url, message) ) # TODO: inject scripts here # return self.execute_script( # 'angular.resumeBootstrap(arguments[0]);' # ) self.execute_script('angular.resumeBootstrap();')
def _find_feeds_worker(self, url, curr_depth=0, soup=None): try: soup, url = self._fetch_and_parse(url) except (requests.RequestException, exceptions.ForagerException): if curr_depth <= self._raise_to_depth: raise if not soup: return set() if Forager._soup_contains_feed(soup): return {url} if not Forager._soup_contains_html(soup): return set() links_to_crawl = self._get_relevant_links(soup, curr_depth) found_feeds = set() for new_url in links_to_crawl: new_url = urljoin(url, new_url) new_url = self._fix_url(new_url) found_feeds.update(self._find_feeds_worker(new_url, curr_depth=curr_depth + 1)) return found_feeds
def handle_node(path, size_handler, is_dir): """ Generic helper function for handling a remote file system node """ if is_dir and files_only: return url = urljoin(url_prefix, sftp.normalize(path)) title = remotepath.basename(path) entry = Entry(title, url) if get_size: try: size = size_handler(path) except Exception as e: log.error('Failed to get size for %s (%s)' % (path, e)) size = -1 entry['content_size'] = size if private_key: entry['private_key'] = private_key if private_key_pass: entry['private_key_pass'] = private_key_pass entries.append(entry)
def reconfigResource(self, new_config): self.avatarMethods = new_config.www.get('avatar_methods', []) self.defaultAvatarFullUrl = urljoin(new_config.buildbotURL, self.defaultAvatarUrl) self.cache = {} # ensure the avatarMethods is a iterable if isinstance(self.avatarMethods, AvatarBase): self.avatarMethods = (self.avatarMethods, )
def guess_icon(self): """Guesses an icon from itself.""" def get_icon_url(): try: if self.autoindex: icon_map = self.autoindex.icon_map + self.icon_map else: icon_map = self.icon_map for icon, rule in icon_map: if not rule and callable(icon): matched = icon = icon(self) else: matched = rule(self) if matched: return icon except AttributeError: pass try: return self.default_icon except AttributeError: raise GuessError('There is no matched icon.') try: return urljoin(url_for('.silkicon', filename=''), get_icon_url()) except (AttributeError, RuntimeError): return 'ERROR' return get_icon_url()
def test_deleted_project_link_not_included(self, project_deleted, create_tmp_directory): with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory): urls = get_all_sitemap_urls() assert urljoin(settings.DOMAIN, project_deleted.url) not in urls
def base_url(self): """str: The base url of the REST API.""" url = urljoin( '{}://{}:{}'.format(self.server_protocol, self.host, self.port or 80), self.endpoint_prefix) if not url.endswith('/'): url += '/' return url
def _new_dbx_auth_flow(session): return dropbox.DropboxOAuth2Flow( _APP.config['DBX_APP_KEY'], _APP.config['DBX_APP_SECRET'], urljoin(_APP.config['BASE_URL'], flask.url_for('route_finish')), session, 'dbx-auth-csrf-token', )
def test_unconfirmed_user_not_included(self, create_tmp_directory, user_unconfirmed): with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory): urls = get_all_sitemap_urls() assert urljoin(settings.DOMAIN, user_unconfirmed.url) not in urls
def enketo_url(form_url, id_string, instance_xml=None, instance_id=None, return_url=None, **kwargs): """Return Enketo webform URL.""" if (not hasattr(settings, 'ENKETO_URL') or not hasattr(settings, 'ENKETO_API_SURVEY_PATH') or not hasattr(settings, 'ENKETO_API_TOKEN') or settings.ENKETO_API_TOKEN == ''): return False url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_SURVEY_PATH) values = {'form_id': id_string, 'server_url': form_url} if instance_id is not None and instance_xml is not None: url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH) values.update({ 'instance': instance_xml, 'instance_id': instance_id, # convert to unicode string in python3 compatible way 'return_url': u'%s' % return_url }) if kwargs: # Kwargs need to take note of xform variable paths i.e. # kwargs = {'defaults[/widgets/text_widgets/my_string]': "Hey Mark"} values.update(kwargs) response = requests.post(url, data=values, auth=(settings.ENKETO_API_TOKEN, ''), verify=getattr(settings, 'VERIFY_SSL', True)) if response.status_code in [200, 201]: try: data = json.loads(response.content) except ValueError: pass else: url = (data.get('edit_url') or data.get('offline_url') or data.get('url')) if url: return url handle_enketo_error(response)
def trigger_dag(dag_id, run_id, api_url, conf): json_data = { "run_id": run_id, "conf": conf } json_data["check_payload"] = jwt.encode(json_data, private_key, algorithm='RS256').decode("utf-8") return requests.post(url=urljoin(api_url, f"""/api/experimental/dags/{dag_id}/dag_runs"""), json=json_data)
def test_collection_link_not_included(self, collection, create_tmp_directory): with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory): urls = get_all_sitemap_urls() assert urljoin(settings.DOMAIN, collection.url) not in urls
def search_user(self, name_or_phone): if ''.isdigit(): params = {"phone": name_or_phone} else: params = {"name": name_or_phone} endpoint = "/api/v1/search/users" url = urljoin(self.api_base_url, endpoint) return self._request(url, params=params)
def get_csv_url(self, r): p_instance = self.get_value_of('pInstance', r) p_flow_id = self.get_value_of('pFlowId', r) p_flow_step_id = self.get_value_of('pFlowStepId', r) url_args = "f?p={}:{}:{}:CSV::::".format(p_flow_id, p_flow_step_id, p_instance) csv_url = urljoin(URL_PREFIX, url_args) yield Request(url=csv_url, callback=self.parse)
def parse_js_msg_list(self): msg_list = self.ctx.call('r_msg_list') app_msg_ext_info_list = [i['app_msg_ext_info'] for i in msg_list] comm_msg_info_date_time_list = [ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(i['comm_msg_info']['datetime'])) for i in msg_list ] # msg_id_list = [i['comm_msg_info']['id'] for i in msg_list] msg_data_list = [ { # 'article_id': '%s_000' % msg_id_list[index], 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index], } for index, i in enumerate(app_msg_ext_info_list) ] msg_ext_list = [ i['multi_app_msg_item_list'] for i in app_msg_ext_info_list ] for index_j, j in enumerate(msg_ext_list): for index_i, i in enumerate(j): msg_data_list.append({ # 'article_id': '%s_%03d' % (msg_id_list[index_j], index_i + 1), 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index_j], }) return msg_data_list
def get_token(self): endpoint = "/api/v1/auth/token" url = urljoin(self.api_base_url, endpoint) data = self._request(url, method='POST', json={ "username": self.username, "password": self.password }) self._token = data["token"] self._token_expired = parse_date(data["expired"])
def append_url(base_url, path): """ Append path to base_url in a sensible way. """ if base_url[-1] != "/": base_url += "/" if path[0] == "/": path = path[1:] return urljoin(base_url, path)
def get_field(self, pid, field=None): """Return metadata on analysis.""" dct = self._make_request(url=urljoin('deposits/', pid), headers={'Accept': 'application/basic+json' })['metadata'] fields = field.split('.') if field else [] for x in fields: dct = dct[x or int(x)] return dct
def test_embargoed_registration_link_not_included(self, registration_embargoed, create_tmp_directory): with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory): urls = get_all_sitemap_urls() assert urljoin(settings.DOMAIN, registration_embargoed.url) not in urls
def commit_draft(draft_uuid): """ Commit the draft, saving the files to the Blockstore bundle. """ url = urljoin(settings.BLOCKSTORE_API_URL, 'drafts/{}/commit'.format(draft_uuid)) log.debug("POST %s", url) response = requests.post(url) response.raise_for_status()
def api_request(self, path, data, headers={}): url = urljoin(self.base_url, path) r = self.s.post(url, data=data, headers=headers, timeout=10, verify=False) r.raise_for_status() return r.json()
def handle_entry(self, entry, sftp, config, url_prefix): location = entry['location'] filename = localpath.basename(location) to = config['to'] if to: try: to = render_from_entry(to, entry) except RenderError as e: log.error('Could not render path: %s', to) entry.fail(e) return destination = remotepath.join(to, filename) destination_url = urljoin(url_prefix, destination) if not os.path.exists(location): log.warning('File no longer exists: %s', location) return if not sftp.lexists(to): try: sftp.makedirs(to) except Exception as e: log.error('Failed to create remote directory %s (%s)' % (to, e)) entry.fail(e) return if not sftp.isdir(to): log.error('Not a directory: %s' % to) entry.fail return try: sftp.put(localpath=location, remotepath=destination) log.verbose('Successfully uploaded %s to %s' % (location, destination_url)) except OSError as e: log.warning('File no longer exists: %s', location) return except IOError as e: log.error('Remote directory does not exist: %s (%s)' % to) entry.fail return except Exception as e: log.error('Failed to upload %s (%s)' % (location, e)) entry.fail return if config['delete_origin']: try: os.remove(location) except Exception as e: log.error('Failed to delete file %s (%s)')
def set(self, pid, value, field=None): """Update analysis metadata. :param pid: analysis PID :type pid: str :param value: value to set :type value: JSON serializable object :param field: set specific field, eg. obj.nested_arr.0 :type field: str, optional :return: updated analysis metadata :rtype: dict """ if field: # use JSON patch to patch fields res = self._make_request(url=urljoin('deposits/', pid), method='patch', headers={ 'Content-Type': 'application/json-patch+json', 'Accept': 'application/basic+json' }, data=json.dumps([{ "op": "replace", "path": '/' + field.replace('.', '/'), "value": value, }])) else: # use PUT request to update the whole object if not isinstance(value, dict): raise UsageError('Not a JSON object.') res = self._make_request( url=urljoin('deposits/', pid), method='put', headers={ 'Content-Type': 'application/json', 'Accept': 'application/basic+json' }, data=json.dumps(value), ) return res
def create_pool(self, name, slots, description): endpoint = '/api/experimental/pools' url = urljoin(self._api_base_url, endpoint) pool = self._request(url, method='POST', json={ 'name': name, 'slots': slots, 'description': description, }) return pool['pool'], pool['slots'], pool['description']
def process_html(self, current_url, response): if response.body in (None, b'', ''): return content_type = response.headers.get('Content-Type', '') if 'text/html' in content_type.lower(): soup = BeautifulSoup(response.body, 'html5lib') script_list = [t.get('src') for t in soup.find_all('script')] style_list = [t.get('href') for t in soup.find_all('link')] a_list = [t.get('href') for t in soup.find_all('a')] script_style_list = script_list + style_list script_style_list = set( [t for t in script_style_list if t is not None]) a_list = set([t for t in a_list if t is not None]) new_a_list = [] for t in a_list: if t.startswith('#'): continue # apache 目录列出,排序的链接 if t in ('?C=N;O=A', '?C=N;O=D', '?C=M;O=A', '?C=M;O=D', '?C=D;O=A', '?C=D;O=D', '?C=S;O=A', '?C=S;O=D'): continue url = urljoin(current_url, t) url_parsed = urlparse(url) if url_parsed.scheme in ('http', 'https'): url = urlunparse( (url_parsed.scheme, url_parsed.netloc, url_parsed.path, '', url_parsed.query, '')) new_a_list.append(url) a_list = set(new_a_list) for t in script_style_list: url = urljoin(current_url, t) self.task_queue.append(url) # 测试 Webpack 的 sourcemap 文件 if (t.endswith('.js') or t.endswith('.css')) and not t.endswith('.map'): self.task_queue.appendleft(url + '.map') for url in a_list: self.task_queue.appendleft(url)
def trigger_dag(self, dag_id, run_id=None, conf=None, execution_date=None): endpoint = '/api/experimental/dags/{}/dag_runs'.format(dag_id) url = urljoin(self._api_base_url, endpoint) data = self._request(url, method='POST', json={ "run_id": run_id, "conf": conf, "execution_date": execution_date, }) return data['message']
def _get_bucket_link(self, pid): """Make request to server to fetch link to analysis bucket. :param pid: analysis PID :type pid: str :return: url to analysis bucket :rtype: str """ ana = self._make_request(urljoin('deposits/', pid)) return ana['links']['bucket']
def login(self, username, password=None): """Log in to the Web Services If username and password are provided, use the WS login function to authenticate the user Else, if password is not provided, store the token that will be used in the HTTP authentication """ logger.info("login, connection requested, login: %s", username) if not self.authenticated: self.token = username self.connected = True logger.info( "Alignak WS, no authentication configured, token: %s", self.token) return self.connected if not password: # Set authentication token (no login request). self.token = username self.connected = True logger.info("Alignak WS, no password provided, token: %s", self.token) return self.connected self.connected = False try: # WS login logger.info( "Requesting Web Service authentication, username: %s", username) headers = {'Content-Type': 'application/json'} params = {'username': username, 'password': password} response = requests.post(urljoin(self.alignak_endpoint, 'login'), json=params, headers=headers) resp = response.json() if '_result' in resp: self.token = resp['_result'][0] except RequestsConnectionError as exp: message = "configured Web service connection failed with " \ "provided login information: %s (%s)" % (self.alignak_endpoint, username) logger.warning(message) logger.debug("Exception: %s", str(exp)) return False except Exception as exp: # pragma: no cover, should not happen logger.exception("WS user login exception: %s", exp) return False logger.info("login result: %s", self.connected) return self.connected
def __make_api_request(self, http_method, path, data=None, response_callback=lambda x: x, writer=False, interrupted_event=None, trace=False, json=None, maintenance_timeouts=None, maintenance_msg=None): url = urljoin(self.base_url, path) ids = id_gen(str(uuid.uuid4())) if json: request = requests.Request(http_method, url, json=json, headers={'User-Agent': self.user_agent}, params=self.params) else: request = requests.Request(http_method, url, data=data, headers={'User-Agent': self.user_agent}, params=self.params) network_timeouts = self.network_timeouts() maintenance_timeouts = maintenance_timeouts or self.maintenance_timeouts( ) maintenance_msg = maintenance_msg or "%s is under maintenance" % ( self._base_url) while interrupted_event is None or not interrupted_event.is_set(): try: response = self.__send_single_request(request, ids.next(), trace=trace) return response_callback(response) except (Timeout, ConnectionError, ProtocolError): logger.warn(traceback.format_exc()) try: timeout = next(network_timeouts) logger.warn("Network error, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise self.NetworkError() except self.UnderMaintenance as e: try: timeout = next(maintenance_timeouts) logger.warn(maintenance_msg) logger.warn("Retrying in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise e
def delete(self, endpoint, headers): """ Method to delete an item or all items headers['If-Match'] must contain the _etag identifier of the element to delete :param endpoint: endpoint (API URL) :type endpoint: str :param headers: headers (example: Content-Type) :type headers: dict :return: response (deletion information) :rtype: dict """ if not self.token: logger.error("Authentication is required for deleting an object.") raise BackendException(1001, "Access denied, please login before trying to delete") logger.debug("delete, endpoint: %s", urljoin(self.url_endpoint_root, endpoint)) logger.debug("delete, headers: %s", headers) try: response = requests.delete( urljoin(self.url_endpoint_root, endpoint), headers=headers, auth=HTTPBasicAuth(self.token, '') ) logger.debug("delete, response: %s", response) if response.status_code != 204: # pragma: no cover - should not happen ... response.raise_for_status() response = {"_status": "OK"} return response except Timeout as e: # pragma: no cover - need specific backend tests logger.error("Backend connection timeout, error: %s", str(e)) raise BackendException(1002, "Backend connection timeout") except HTTPError as e: # pragma: no cover - need specific backend tests logger.error("Backend HTTP error, error: %s", str(e)) raise BackendException(1003, "Backend HTTPError: %s / %s" % (type(e), str(e))) except Exception as e: # pragma: no cover - security ... logger.error("Backend connection exception, error: %s / %s", type(e), str(e)) raise BackendException(1000, "Backend exception: %s / %s" % (type(e), str(e))) return {}
def get(self, url): full_url = urljoin(str(self._base_url), str(url)) super(WebDriverMixin, self).get(full_url) if not self.ignore_synchronization: test_result = self._test_for_angular() if 'message' in test_result: raise AngularNotFoundException( "Angular could not be found on page: {}:" " {}".format(full_url, test_result['message'])) elif test_result['ver'] == 1: self.execute_script("angular.resumeBootstrap();")
def delete(self, pid): """Delete draft analysis. :param pid: analysis PID :type pid: str """ self._make_request( url=urljoin('deposits/', pid), method='delete', expected_status_code=204, )
def expand_theme(element, theme_doc, absolute_prefix): prefix = urljoin(absolute_prefix, element.get('prefix', '')) apply_absolute_prefix(theme_doc, prefix) escape_curly_brackets(theme_doc) theme_root = theme_doc.getroot() preceding = list(theme_root.itersiblings(preceding=True)) preceding.reverse() following = list(theme_root.itersiblings(preceding=False)) element.extend(preceding) element.append(theme_root) element.extend(following)
def extract_url(self, r, xpq): extracted = r.xpath(xpq).extract() if not extracted or len(extracted) != 1: msg = 'URL not found. Page structure of "{}" probably changed!'.format( r.url) self.logger.critical(msg) raise scrapy.exceptions.CloseSpider(msg) url = extracted[0].strip() if url[0:4] != 'http': url = urljoin(URL_PREFIX, url) return url
def handle_entry(self, entry, sftp, config, url_prefix): location = entry['location'] filename = localpath.basename(location) to = config['to'] if to: try: to = render_from_entry(to, entry) except RenderError as e: log.error('Could not render path: %s', to) entry.fail(e) return destination = remotepath.join(to, filename) destination_url = urljoin(url_prefix, destination) if not os.path.exists(location): log.warning('File no longer exists: %s', location) return if not sftp.lexists(to): try: sftp.makedirs(to) except Exception as e: log.error('Failed to create remote directory %s (%s)' % (to, e)) entry.fail(e) return if not sftp.isdir(to): log.error('Not a directory: %s' % to) entry.fail('Not a directory: %s' % to) return try: sftp.put(localpath=location, remotepath=destination) log.verbose('Successfully uploaded %s to %s' % (location, destination_url)) except OSError as e: log.warning('File no longer exists: %s', location) return except IOError as e: log.error('Remote directory does not exist: %s (%s)' % to) entry.fail return except Exception as e: log.error('Failed to upload %s (%s)' % (location, e)) entry.fail return if config['delete_origin']: try: os.remove(location) except Exception as e: log.error('Failed to delete file %s (%s)')
def add_file_to_draft(draft_uuid, path, data): """ Add the specified file data to the draft """ url = urljoin(settings.BLOCKSTORE_API_URL, 'drafts/{}'.format(draft_uuid)) log.debug("PATCH %s", url) response = requests.patch( url, json={'files': { path: encode_str_for_draft(data) }}) response.raise_for_status()
def create(self): self._number, self._token = self.api_client.new_job(task=self.task, person=self.person, tank=self.tank, loadscheme=self.load_scheme, target_host=self.target_host, target_port=self.target_port, detailed_time=self.detailed_time, notify_list=self.notify_list, trace=self.log_other_requests) logger.info('Job created: {}'.format(self._number)) self.web_link = urljoin(self.api_client.base_url, str(self._number))
def __make_api_request( self, http_method, path, data=None, response_callback=lambda x: x, writer=False, interrupted_event=None, trace=False, json=None, maintenance_timeouts=None, maintenance_msg=None): url = urljoin(self.base_url, path) ids = id_gen(str(uuid.uuid4())) if json: request = requests.Request( http_method, url, json=json, headers={'User-Agent': self.user_agent}, params=self.params) else: request = requests.Request( http_method, url, data=data, headers={'User-Agent': self.user_agent}, params=self.params) network_timeouts = self.network_timeouts() maintenance_timeouts = maintenance_timeouts or self.maintenance_timeouts() maintenance_msg = maintenance_msg or "%s is under maintenance" % (self._base_url) while interrupted_event is None or not interrupted_event.is_set(): try: response = self.__send_single_request(request, ids.next(), trace=trace) return response_callback(response) except (Timeout, ConnectionError, ProtocolError): logger.warn(traceback.format_exc()) if not self.core_interrupted.is_set(): try: timeout = next(network_timeouts) logger.warn( "Network error, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise self.NetworkError() else: break except self.UnderMaintenance as e: if not self.core_interrupted.is_set(): try: timeout = next(maintenance_timeouts) logger.warn(maintenance_msg) logger.warn("Retrying in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise e else: break
def _request(self, url, method, params): methods = { "GET": self._get, "POST": self._post, "PUT": self._put, "DELETE": self._delete, "HEAD": self._head } request_method = methods[method.upper()] url = urljoin(self.connection.endpoint, url) return request_method(url, params=params)
def request(self, method, url, data=None, headers=None, **kwargs): full_url = urljoin(self.host, url) if not headers: headers = {} headers['user-agent'] = self.__user_agent() rsp = super(MendeleySession, self).request(method, full_url, data, headers, **kwargs) if rsp.ok: return rsp else: raise MendeleyApiException(rsp)
def login(self, username, password=None): """Log in to the Web Services If username and password are provided, use the WS login function to authenticate the user Else, if password is not provided, store the token that will be used in the HTTP authentication """ logger.info("login, connection requested, login: %s", username) if not self.authenticated: self.token = username self.connected = True logger.info("Alignak WS, no authentication configured, token: %s", self.token) return self.connected if not password: # Set authentication token (no login request). self.token = username self.connected = True logger.info("Alignak WS, no password provided, token: %s", self.token) return self.connected self.connected = False try: # WS login logger.info("Requesting Web Service authentication, username: %s", username) headers = {'Content-Type': 'application/json'} params = {'username': username, 'password': password} response = requests.post(urljoin(self.alignak_endpoint, 'login'), json=params, headers=headers) resp = response.json() if '_result' in resp: self.token = resp['_result'][0] except RequestsConnectionError as exp: message = "configured Web service connection failed with " \ "provided login information: %s (%s)" % (self.alignak_endpoint, username) logger.warning(message) logger.debug("Exception: %s", str(exp)) return False except Exception as exp: # pragma: no cover, should not happen logger.exception("WS user login exception: %s", exp) return False logger.info("login result: %s", self.connected) return self.connected
def trigger_dag(self, dag_id, run_id=None, conf=None): endpoint = '/api/experimental/dags/{}/dag_runs'.format(dag_id) url = urljoin(self._api_base_url, endpoint) resp = requests.post(url, auth=self._auth, json={ "run_id": run_id, "conf": conf, }) if not resp.ok: raise IOError() data = resp.json() return data['message']
def single_request(self, host, handler, request_body, verbose=0): url = urljoin('{0}://{1}'.format(self.__scheme, host), handler) auth = self.get_auth() response = self.send_request(url, auth, request_body) # if status code is 401, it means we used the wrong auth method if response.status_code == 401: log.warning('%s auth failed. Retrying with %s. Please change your config.', 'Digest' if self.__digest_auth else 'Basic', 'Basic' if self.__digest_auth else 'Digest') self.__digest_auth = not self.__digest_auth auth = self.get_auth() response = self.send_request(url, auth, request_body) response.raise_for_status() return self.parse_response(response)
def request(self, method, url, data=None, headers=None, **kwargs): full_url = urljoin(self.host, url) if not headers: headers = {} headers['user-agent'] = self.__user_agent() try: rsp = self.__do_request(data, full_url, headers, kwargs, method) except TokenExpiredError: if self.refresher: self.refresher.refresh(self) rsp = self.__do_request(data, full_url, headers, kwargs, method) else: raise if rsp.ok: return rsp else: raise MendeleyApiException(rsp)
def start_test(self): self.add_cleanup(self.join_threads) self.status_sender.start() self.upload.start() self.monitoring.start() if self.core.error_log: self.events.start() self.web_link = urljoin(self.lp_job.api_client.base_url, str(self.lp_job.number)) logger.info("Web link: %s", self.web_link) self.publish("jobno", self.lp_job.number) self.publish("web_link", self.web_link) jobno_file = self.get_option("jobno_file", '') if jobno_file: logger.debug("Saving jobno to: %s", jobno_file) with open(jobno_file, 'w') as fdes: fdes.write(str(self.lp_job.number)) self.core.add_artifact_file(jobno_file) self.__save_conf()