def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False): extra_links = 4 # first, prev, next, last search = "'>\"/><img src=x onerror=alert(1)>" html_str = utils.generate_pages(current_page, total_pages, search=search) self.assertNotIn(search, html_str, "The raw search string shouldn't appear in the output") self.assertIn('search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E', html_str) self.assertTrue(callable(html_str.__html__), "Should return something that is HTML-escaping aware") dom = BeautifulSoup(html_str, 'html.parser') self.assertIsNotNone(dom) ulist = dom.ul ulist_items = ulist.find_all('li') self.assertEqual(min(window, total_pages) + extra_links, len(ulist_items)) page_items = ulist_items[2:-2] mid = int(len(page_items) / 2) for i, item in enumerate(page_items): a_node = item.a href_link = a_node['href'] node_text = a_node.string if node_text == str(current_page + 1): if check_middle: self.assertEqual(mid, i) self.assertEqual('javascript:void(0)', href_link) self.assertIn('active', item['class']) else: self.assertRegex(href_link, r'^\?', 'Link is page-relative') query = parse_qs(href_link[1:]) self.assertListEqual(query['page'], [str(int(node_text) - 1)]) self.assertListEqual(query['search'], [search])
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False): extra_links = 4 # first, prev, next, last search = "'>\"/><img src=x onerror=alert(1)>" html_str = utils.generate_pages(current_page, total_pages, search=search) assert search not in html_str, "The raw search string shouldn't appear in the output" assert 'search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E' in html_str assert callable( html_str.__html__ ), "Should return something that is HTML-escaping aware" dom = BeautifulSoup(html_str, 'html.parser') assert dom is not None ulist = dom.ul ulist_items = ulist.find_all('li') assert min(window, total_pages) + extra_links == len(ulist_items) page_items = ulist_items[2:-2] mid = int(len(page_items) / 2) for i, item in enumerate(page_items): a_node = item.a href_link = a_node['href'] node_text = a_node.string if node_text == str(current_page + 1): if check_middle: assert mid == i assert 'javascript:void(0)' == href_link assert 'active' in item['class'] else: assert re.search(r'^\?', href_link), 'Link is page-relative' query = parse_qs(href_link[1:]) assert query['page'] == [str(int(node_text) - 1)] assert query['search'] == [search]
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False): extra_links = 4 # first, prev, next, last html_str = utils.generate_pages(current_page, total_pages) # dom parser has issues with special « and » html_str = html_str.replace('«', '') html_str = html_str.replace('»', '') dom = minidom.parseString(html_str) self.assertIsNotNone(dom) ulist = dom.getElementsByTagName('ul')[0] ulist_items = ulist.getElementsByTagName('li') self.assertEqual( min(window, total_pages) + extra_links, len(ulist_items)) def get_text(nodelist): rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc) page_items = ulist_items[2:-2] mid = int(len(page_items) / 2) for i, item in enumerate(page_items): a_node = item.getElementsByTagName('a')[0] href_link = a_node.getAttribute('href') node_text = get_text(a_node.childNodes) if node_text == str(current_page + 1): if check_middle: self.assertEqual(mid, i) self.assertEqual('javascript:void(0)', a_node.getAttribute('href')) self.assertIn('active', item.getAttribute('class')) else: link_str = '?page=' + str(int(node_text) - 1) self.assertEqual(link_str, href_link)
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False): extra_links = 4 # first, prev, next, last search = "'>\"/><img src=x onerror=alert(1)>" html_str = utils.generate_pages(current_page, total_pages, search=search) self.assertNotIn(search, html_str, "The raw search string shouldn't appear in the output") self.assertIn('search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E', html_str) self.assertTrue( callable(html_str.__html__), "Should return something that is HTML-escaping aware" ) dom = BeautifulSoup(html_str, 'html.parser') self.assertIsNotNone(dom) ulist = dom.ul ulist_items = ulist.find_all('li') self.assertEqual(min(window, total_pages) + extra_links, len(ulist_items)) page_items = ulist_items[2:-2] mid = int(len(page_items) / 2) for i, item in enumerate(page_items): a_node = item.a href_link = a_node['href'] node_text = a_node.string if node_text == str(current_page + 1): if check_middle: self.assertEqual(mid, i) self.assertEqual('javascript:void(0)', href_link) self.assertIn('active', item['class']) else: self.assertRegex(href_link, r'^\?', 'Link is page-relative') query = parse_qs(href_link[1:]) self.assertListEqual(query['page'], [str(int(node_text) - 1)]) self.assertListEqual(query['search'], [search])
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False): extra_links = 4 # first, prev, next, last html_str = utils.generate_pages(current_page, total_pages) # dom parser has issues with special « and » html_str = html_str.replace('«', '') html_str = html_str.replace('»', '') dom = minidom.parseString(html_str) self.assertIsNotNone(dom) ulist = dom.getElementsByTagName('ul')[0] ulist_items = ulist.getElementsByTagName('li') self.assertEqual(min(window, total_pages) + extra_links, len(ulist_items)) def get_text(nodelist): rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc) page_items = ulist_items[2:-2] mid = int(len(page_items) / 2) for i, item in enumerate(page_items): a_node = item.getElementsByTagName('a')[0] href_link = a_node.getAttribute('href') node_text = get_text(a_node.childNodes) if node_text == str(current_page + 1): if check_middle: self.assertEqual(mid, i) self.assertEqual('javascript:void(0)', a_node.getAttribute('href')) self.assertIn('active', item.getAttribute('class')) else: link_str = '?page=' + str(int(node_text) - 1) self.assertEqual(link_str, href_link)
def index(self, session=None): DM = models.DagModel # restrict the dags shown if filter_by_owner and current user is not superuser do_filter = FILTER_BY_OWNER owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower() hide_paused_dags_by_default = conf.getboolean( 'webserver', 'hide_paused_dags_by_default') show_paused_arg = request.args.get('showPaused', 'None') def get_int_arg(value, default=0): try: return int(value) except ValueError: return default arg_current_page = request.args.get('page', '0') arg_search_query = request.args.get('search', None) dags_per_page = PAGE_SIZE current_page = get_int_arg(arg_current_page, default=0) if show_paused_arg.strip().lower() == 'false': hide_paused = True elif show_paused_arg.strip().lower() == 'true': hide_paused = False else: hide_paused = hide_paused_dags_by_default # read orm_dags from the db query = session.query(DM) query = query.filter(~DM.is_subdag, DM.is_active) # optionally filter out "paused" dags if hide_paused: query = query.filter(~DM.is_paused) if arg_search_query: query = query.filter( DM.dag_id.ilike('%' + arg_search_query + '%') | DM.owners.ilike('%' + arg_search_query + '%')) query = query.order_by(DM.dag_id) start = current_page * dags_per_page end = start + dags_per_page dags = query.offset(start).limit(dags_per_page).all() import_errors = session.query(errors.ImportError).all() for ie in import_errors: flash("Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie), "error") from airflow.plugins_manager import import_errors as plugin_import_errors for filename, stacktrace in plugin_import_errors.items(): flash( "Broken plugin: [{filename}] {stacktrace}".format( stacktrace=stacktrace, filename=filename), "error") num_of_all_dags = query.count() num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page))) auto_complete_data = set() for row in query.with_entities(DM.dag_id, DM.owners): auto_complete_data.add(row.dag_id) auto_complete_data.add(row.owners) return self.render( 'airflow/dags.html', dags=dags, hide_paused=hide_paused, current_page=current_page, search_query=arg_search_query if arg_search_query else '', page_size=dags_per_page, num_of_pages=num_of_pages, num_dag_from=min(start + 1, num_of_all_dags), num_dag_to=min(end, num_of_all_dags), num_of_all_dags=num_of_all_dags, paging=wwwutils.generate_pages(current_page, num_of_pages, search=arg_search_query, showPaused=not hide_paused), auto_complete_data=auto_complete_data)