class ConfluenceTests(unittest.TestCase): # _multiprocess_can_split_ = True def setUp(self): from confluence import Confluence self.conf = Confluence(profile="confluence") #def test_getPage(self): # page = self.conf.getPage(page='test',space='ds') # print page def test_renderContent(self): result = self.conf.renderContent(page='test',space='ds') #tree = ElementTree.fromstring(result) #html = lxml.html.fromstring(result) x=parseString(result) e = x.getElementById('Content') self.assertFalse(e is None, "Unable to find element with id=Content in: '%s'" % result) e.toxml() #result = html.get_element_by_id("Content").__str__() self.assertEqual(result, SAMPLE_XML, "Got '%s' while expecting '%s'." % (result, SAMPLE_XML)) def test_storePageContent(self): self.conf.storePageContent(page='test',space='ds',content=SAMPLE_WIKI) result = self.conf.getPage(page='test',space='ds')['content'] print ":".join("{0:x}".format(ord(c)) for c in result) print ":".join("{0:x}".format(ord(c)) for c in SAMPLE_XML) self.assertEqual(result, SAMPLE_WIKI, "Got '%s' while expecting '%s'." % (result, SAMPLE_XML))
def form(form_id, label, title): trel = Trello() conf = Confluence() """ makes form options is a dict? possible dec num of params? """ # headers: is label neccesary? form = '<form id="{}">'.format(form_id) label = '<label for="{}">{}</label>'.format(label, title) # generalize below using "for options in options_list" resp = trel.get_a_on_b(a='lists', b='boards', b_id=board_id) lists = { l['name'] : l['id'] for l in resp } html += select('trello', 'trelloList', lists) spaces = conf.get_spaces() html += select('confluence', 'confSpace', spaces) html += button('submit', 'mod-primary', 'Create Page!') # close form html += '</form>' html += '<script src="{}"></script>'.format('./js/modal.js') pprint(html) return html
def process(html, title_format, space, parent_page_name): client = Confluence(profile='confluence') title = datetime.datetime.now().strftime(title_format) print(f'title: {title}') print(f'space: {space}') print(f'parent_page: {parent_page_name}') page = client.storePageContent(page=title, space=space, content=html, convert_wiki=False, parent_page=parent_page_name) return page
def ConfluenceWritePage(space, title, content, parent): confl = Confluence(profile='confluence') token = confl._token server = confl._server parent_id = parent try: existing_page = confl.storePageContent(title, space, content) #print "Updated the page" except: write_page(server, token, space, title, content) #print "Created a page" return
class ConfluenceClient(object): def __init__(self, url, username, password): self.confluence = Confluence(url, username, password) """ Get a array of all page titles (including children) in a space. """ def get_page_names(self, space_key): has_next = True page_names = [] start = 0 limit = 25 while has_next is True: pages = self.confluence.get_pages(space_key, start, limit).json() results = pages['results'] for page in results: page_names.append(page['title']) if 'next' in pages['_links']: start = pages['start'] + pages['limit'] else: has_next = False return page_names """ Get count of all pages in a space """ def get_page_count(self, space_key): return len(self.get_page_names(space_key)) """ Create an empty page in the give Confluence space """ def create_empty_page(self, space_key, title, parent_page_id=None): if parent_page_id is not None: ancestors = [{ 'id': parent_page_id }] else: ancestors = None return self.confluence.create_page(space_key, title, ancestors, '') """ Write content to file """ def write_to_file(filepath, content): outfile = open(filepath, 'w') outfile.write(content) outfile.close()
def testHeaders(self): headers = ['Cookie: NAME=VALUE', 'X-CUSTOM-HEADER: VALUE'] want = {'Cookie': 'NAME=VALUE', 'X-CUSTOM-HEADER': 'VALUE'} api = Confluence(api_url='https://wiki.example.com/rest/api', headers=headers) self.assert_headers(api._session.headers, want)
def main(): args = parse_args() confluence = Confluence(api_url=args.api_url, username=args.username, password=args.password, headers=args.headers, dry_run=args.dry_run) if args.posts: changed_posts = [os.path.abspath(post) for post in args.posts] for post_path in changed_posts: if not os.path.exists(post_path) or not os.path.isfile(post_path): log.error('File doesn\'t exist: {}'.format(post_path)) sys.exit(1) else: repo = git.Repo(args.git) changed_posts = [ os.path.join(args.git, post) for post in get_last_modified(repo) ] if not changed_posts: log.info('No post created/modified in the latest commit') return for post in changed_posts: log.info('Attempting to deploy {}'.format(post)) deploy_file(post, args, confluence)
def create(streamIds, **kwargs): """ Creates and loads data into a Confluence, which is a collection of River Streams. :param streamIds: (list) Each data id in this list is a list of strings: 1. river name 2. stream name 3. field name :param kwargs: Passed into Confluence constructor :return: (Confluence) """ print "Creating Confluence for the following RiverStreams:" \ "\n\t%s" % ",\n\t".join([":".join(row) for row in streamIds]) confluence = Confluence(streamIds, **kwargs) confluence.load() return confluence
def testHeadersNoValue(self): # If no value is set, an empty string should be used. headers = ['X-CUSTOM-HEADER-1:', 'X-CUSTOM-HEADER-2'] want = {'X-CUSTOM-HEADER-1': '', 'X-CUSTOM-HEADER-2': ''} api = Confluence(api_url='https://wiki.example.com/rest/api', headers=headers) self.assert_headers(api._session.headers, want)
def excel(): """ Post an excel file and fetch it's configuration """ excel_file = request.files['file'] sheet = request.values.get('sheet') conditional_formatting = False if request.values.get( 'conditional_formatting') == 'false' else True excel = Excel(get_config(), excel_file._file, conditional_formatting=conditional_formatting) confluence = Confluence(get_config()) content = excel.parse(sheet=sheet) content['header'] = request.values.get('header') content['source'] = confluence.source_from_data( content.get('data', {}), header=content.get('header')) return json.dumps(content, default=json_serial)
def connect(self, params={}): """ Connect to Confluence """ self.logger.info("Connecting to Confluence: %s", params.get('url')) self.client = Confluence( url=params.get('url'), username=params.get('credentials').get('username'), password=params.get('credentials').get('password'))
def main(): args = parse_args() client = NotionClient(token_v2=os.environ.get("NOTION_TOKEN")) parent_page = client.get_block(args.notion_page) confluence = Confluence( args.confluence_url, HTTPBasicAuth( os.environ.get("CONFLUENCE_API_USERNAME"), os.environ.get("CONFLUENCE_API_TOKEN"), ), ) exporter = Confluence2Notion(confluence, parent_page) with concurrent.futures.ThreadPoolExecutor( max_workers=args.concurrency) as executor: executor.map(exporter.write_space, confluence.get_spaces())
def connect(self, params={}): """ Connect to Confluence """ self.logger.info("Connecting to Confluence: %s", params.get("url")) self.client = Confluence( url=params.get("url"), username=params.get("credentials").get("username"), password=params.get("credentials").get("password"), )
def confluenceConnector(conflConnector): options = { 'server': conflConnector['url'], 'verify': conflConnector['verify'] } keyValue = pwdCaller('officeLdap')['data'] confl = Confluence(profile='confluence', username=keyValue['user'], password=keyValue['password']) return confl
class ConfluenceTests(unittest.TestCase): # _multiprocess_can_split_ = True def setUp(self): from confluence import Confluence self.conf = Confluence(profile="confluence") # def test_getPage(self): # page = self.conf.getPage(page='test',space='ds') # print page def test_renderContent(self): result = self.conf.renderContent(page="Welcome to Confluence", space="ds") # tree = ElementTree.fromstring(result) # html = lxml.html.fromstring(result) x = parseString(result) e = x.getElementById("Content") self.assertFalse( e is None, "Unable to find element with id=Content in: '%s'" % result) e.toxml() # result = html.get_element_by_id("Content").__str__() self.assertEqual( result, SAMPLE_XML, "Got '%s' while expecting '%s'." % (result, SAMPLE_XML)) def test_storePageContent(self): self.conf.storePageContent(page="test", space="ds", content=SAMPLE_WIKI) result = self.conf.getPage(page="test", space="ds")["content"] print(":".join("{0:x}".format(ord(c)) for c in result)) print(":".join("{0:x}".format(ord(c)) for c in SAMPLE_XML)) self.assertEqual( result, SAMPLE_WIKI, "Got '%s' while expecting '%s'." % (result, SAMPLE_XML))
def testHeadersDuplicates(self): # HTTP headers are case insensitive. If multiple headers with the same # name are passed, the last one should win. headers = [ 'X-CUSTOM-HEADER: foo', 'X-Custom-Header: bar', 'x-custom-header: baz' ] want = {'x-custom-header': 'baz'} api = Confluence(api_url='https://wiki.example.com/rest/api', headers=headers) self.assert_headers(api._session.headers, want)
def _connect(self): try: self.confluence = Confluence( url=self.config.confluence_server_url, username=self.config.confluence_server_user, password=self.config.confluence_server_pass) except ImportError: raise ImportError( "Must install confluence PyPi package to publish") except Exception as ex: raise Exception( "Could not connect, check remote API is configured. %s" % ex)
def main(list_id, space_id): trel = Trello() conf = Confluence() page = trel.format_page(list_id) # html pageData conf.create_page(page, space_key) # creates page print('\n\n\n page.py script is done!!! \n\n\n') # TEST
# main script to make Conf page from Trello list from trello import Trello from confluence import Confluence if __name__ == "__main__": t = Trello() conf = Confluence() conf.create_page(t.mmain()) print('main sript done!')
def main(): parser = argparse.ArgumentParser() parser.add_argument("file", type=str, help="Pandoc's Markdown file path.") args = parser.parse_args() file = pathlib.Path(args.file) # Read Pandoc markwdown file. with file.open('r', encoding='utf_8_sig') as fd: lines = fd.readlines() # Extract YAML metadata block from Pandoc markdown. # http://pandoc.org/README.html#extension-yaml_metadata_block metadata_content = '' if len(lines) > 0 and lines[0] == '---\n': lines.pop(0) while True: if len(lines) == 0: raise Exception('No YAML metadata block end') line = lines.pop(0) if line in ('...\n', '---\n'): metadata_end_line = line break metadata_content += line else: raise Exception('No YAML metadata block') yaml_preserve_order() metadata = yaml.safe_load(metadata_content) confluenceMetadata = metadata.get(CONFLUENCE) # type: Optional[dict] if confluenceMetadata is None: raise Exception( 'No `{0}` section in YAML metadata block'.format(CONFLUENCE)) # Parse username, baseUrl, pageId, spaceKey, title from the metadata. if CONFLUENCE_PAGE_URL in confluenceMetadata: urlstr = confluenceMetadata[CONFLUENCE_PAGE_URL] url = urllib.parse.urlsplit(urlstr) # type: urllib.parse.SplitResult path = pathlib.PurePosixPath(url.path) query = urllib.parse.parse_qs(url.query) plen = len(path.parts) username = url.username if plen >= 4 and path.parts[ plen - 3] == 'display': # e.g. ['/', 'confluence', 'display', '~jsmith', 'Test+page'] basePath = str(path.parents[2]).rstrip('/') pageId = None spaceKey = urllib.parse.unquote_plus(path.parts[plen - 2]) title = urllib.parse.unquote_plus(path.parts[plen - 1]) elif plen >= 3 and path.parts[plen - 2] == 'pages' and path.parts[ plen - 1] == 'viewpage.action': # e.g. ['/', 'confluence', 'pages', 'viewpage.action'] basePath = str(path.parents[1]).rstrip('/') pageId = int(query['pageId'][0]) spaceKey = None title = None else: raise Exception( 'Unknown Confluence page URL format: {0}'.format(urlstr)) elif CONFLUENCE_BASE_URL in confluenceMetadata: urlstr = confluenceMetadata[CONFLUENCE_BASE_URL] url = urllib.parse.urlsplit(urlstr) # type: urllib.parse.SplitResult username = url.username basePath = url.path.rstrip('/') pageId = None spaceKey = None title = None else: raise Exception( 'No `{0}` or `{1}` in `{2}` section of YAML metadata block'.format( CONFLUENCE_PAGE_URL, CONFLUENCE_BASE_URL, CONFLUENCE)) baseUrlWithUsername = urllib.parse.urlunsplit( (url.scheme, url.netloc, basePath, None, None)) baseUrl = urllib.parse.urlunsplit( (url.scheme, url.netloc.rpartition("@")[2], basePath, None, None)) newTitle = metadata.get('title') # type: Optional[str] authors = metadata.get('author', []) # type: List[str] # Set default user name. if username is None: if len(authors) > 0: author = authors[0] firstname, lastname = author.split() # type: str username = firstname[0].lower() + lastname.lower() else: username = getpass.getuser() # Set default space key. if spaceKey is None: spaceKey = '~' + username # Convert Pandoc's Markdown file to Confluence Storage Format (CSF) using `pandoc` utility. cmd = [ "pandoc", "--from=markdown+hard_line_breaks+lists_without_preceding_blankline+compact_definition_lists+smart+autolink_bare_uris", "--to", os.path.join(os.path.dirname(sys.argv[0]), "csf.lua"), str(file) ] res = subprocess.run(cmd, stdout=subprocess.PIPE) content = res.stdout.decode('utf-8') if confluenceMetadata.get(CONFLUENCE_NOTE_AUTOGEN, False): # type: bool note = textwrap.dedent("""\ <ac:structured-macro ac:name="info" ac:schema-version="1"> <ac:rich-text-body><p>This page is generated automatically from <ac:link><ri:attachment ri:filename="{filename}"/></ac:link> using <a href="{project_url}">{project_name}</a>.</p></ac:rich-text-body> </ac:structured-macro> """).format(filename=html.escape(file.name), project_name=html.escape(PROJECT_NAME), project_url=html.escape(PROJECT_URL)) content = note + content # Ask username and password. confluence = Confluence(baseUrl, username) # Request Confluence API to edit or create a page. try: info = confluence.post_page(pageId, spaceKey, title, newTitle, content) except requests.exceptions.HTTPError as ex: response = ex.response # type: requests.models.Response if response.status_code == 401: print('Authentication failed.') else: print(ex) print(response.text) return else: if info is None: return # Update metadata. confluenceMetadata.pop(CONFLUENCE_BASE_URL, None) confluenceMetadata[ CONFLUENCE_PAGE_URL] = baseUrlWithUsername + info['_links']['webui'] confluenceMetadata[CONFLUENCE_PAGE_VERSION] = info['version']['number'] # Rewrite Pandoc markdown file with updated YAML metadata block. fd = tempfile.NamedTemporaryFile('w', encoding='utf_8', delete=False, dir=str(file.parent), suffix='.tmp') # type: io.TextIOWrapper with fd: fd.write('---\n') yaml.dump(metadata, fd, default_flow_style=False, allow_unicode=True) fd.write(metadata_end_line) fd.writelines(lines) os.replace(fd.name, str(file)) # src and dst are on the same filesystem # Attach source file. try: confluence.attach_file(info, file, content_type="text/markdown", comment="Source code of this page.") except requests.exceptions.HTTPError as ex: response = ex.response # type: requests.models.Response print(ex) print(response.text)
def setUp(self): from confluence import Confluence self.conf = Confluence(profile="confluence-test")
def setUp(self): from confluence import Confluence self.conf = Confluence(profile="confluence")
def __init__(self, url, username, password): self.confluence = Confluence(url, username, password)
def setUp(self): self.space = 'SPACE' self.slug = "example-page" self.api = Confluence(api_url='https://wiki.example.com/rest/api', username='******', password='******')
class TestConfluence(unittest.TestCase): def setUp(self): self.space = 'SPACE' self.slug = "example-page" self.api = Confluence(api_url='https://wiki.example.com/rest/api', username='******', password='******') def testPostExists(self): response = { "results": [ { "id": "1234567", "type": "page", "status": "current", "title": "Example Page", "restrictions": {}, }, ], "start": 0, "limit": 25, "size": 1 } client = MockConfluenceClient(response=response, status=200, is_json=True) self.api._session = client got = self.api.exists(slug=self.slug) self.assertTrue(got) def testPostDoesntExist(self): response = { "results": [], "start": 0, "limit": 25, "size": 0, } client = MockConfluenceClient(response=response, status=200, is_json=True) self.api._session = client got = self.api.exists(slug=self.slug) self.assertFalse(got) def testLabelCreation(self): slug = 'example-post' tags = ['knowledge', 'testing'] expected = [{ 'prefix': DEFAULT_LABEL_PREFIX, 'name': slug }, { 'prefix': DEFAULT_LABEL_PREFIX, 'name': 'knowledge' }, { 'prefix': DEFAULT_LABEL_PREFIX, 'name': 'testing' }] client = MockConfluenceClient(response={}, status=200, is_json=True) self.api._session = client self.api.create_labels(page_id='12345', slug=slug, tags=tags) sent = self.api._session.requests[0] self.assertEqual(len(sent['json']), len(expected)) for label in expected: self.assertIn(label, sent['json']) def testGetAuthor(self): userKey = '1234567890' expected = { "type": "known", "username": "******", "userKey": userKey, "profilePicture": { "path": "/download/attachments/123456/user-avatar", "width": 48, "height": 48, "isDefault": False }, "displayName": "Foo Bar" } client = MockConfluenceClient(response=expected, status=200, is_json=True) self.api._session = client got = self.api.get_author('foo') self.assertEqual(got['userKey'], userKey)
def connect(): connection = Confluence(url=confluence_server, username=confluence_user, password=confluence_password) return connection
class DNSUpdaterNG(object): def __init__(self, base_url, username, password, page_id, verify_ssl=True): self.page_id = page_id self.c = Confluence(base_url, username, password, verify_ssl=verify_ssl) def update(self): page = self.c.get_page(self.page_id) if not page: raise Exception("Failed to fetch confluence page") data = BeautifulSoup(page, "lxml") stored_hash = None page_hash = hashlib.sha512(page.encode('ascii', errors='ignore')).hexdigest() try: hf = open(PERSISTENT_HASH_FILE) stored_hash = hf.read() hf.close() except: pass if stored_hash == page_hash: logger.info('equality via hash, no-op') return else: hf = open(PERSISTENT_HASH_FILE, 'w') hf.write(page_hash) hf.close() logger.info('updating DNS') addrtables = self._locate_addrtables(data) addrtable_data = [] for addrtable in addrtables: for addrtable_info in self._parse_addrtable(addrtable): addrtable_data.append(addrtable_info) zone_updates = self._build_batch_update(addrtable_data) self._update_all_zones(zone_updates) logger.info('all done') def _update_all_zones(self, zone_updates): commands = [] for zonename in zone_updates.keys(): commands.append('server 127.1') commands.append('zone %s' % (zonename)) updates = zone_updates[zonename] for update in updates: commands.append('update delete %s' % (update[0])) commands.append('update add %s' % (update[1])) commands.append('send') cmd_out = '\n'.join(commands).encode('ascii', errors='ignore') logger.debug(cmd_out.decode("ascii")) p = Popen(['nsupdate'], stdout=PIPE, stdin=PIPE, stderr=PIPE) stdout, stderr = p.communicate(input=cmd_out)[0:2] ser = stderr.decode('ascii', errors='ignore') if ser.strip() != '': logger.error("%s: %s" % (zonename, ser.strip())) commands = [] def _build_batch_update(self, addrtable_data): zone_updates = {} for item in addrtable_data: zi = item['_zoneinfo'] if zi['dns_zone'] != '-' and zi['dns_zone'] not in zone_updates: zone_updates[zi['dns_zone']] = [] if zi['dns_reverse4'] != '-' and zi['dns_reverse4'] not in zone_updates: zone_updates[zi['dns_reverse4']] = [] if zi['dns_reverse6'] != '-' and zi['dns_reverse6'] not in zone_updates: zone_updates[zi['dns_reverse6']] = [] if item['Name'] == '': continue if item['A'] != '': if zi['dns_zone'] != '-': zone_updates[zi['dns_zone']].append([ "%s.%s. A" % (item['Name'], zi['dns_suffix']), "%s.%s. 60 A %s" % (item['Name'], zi['dns_suffix'], item['A']) ]) if zi['dns_reverse4'] != '-': this_reverse = IPy.IP(item['A']).reverseNames()[0] if this_reverse.endswith(zi['dns_reverse4']): zone_updates[zi['dns_reverse4']].append([ "%s PTR" % (IPy.IP(item['A']).reverseNames()[0]), "%s 60 PTR %s.%s." % (IPy.IP(item['A']).reverseNames()[0], item['Name'], zi['dns_suffix']) ]) else: logger.error("IP address %s don't match reverse %s" % (item['A'], zi['dns_reverse4'])) if item['AAAA'] != '': ipv6 = item['AAAA'] if '/' in ipv6: ipv6, crap = ipv6.split('/',1) if zi['dns_zone'] != '-': zone_updates[zi['dns_zone']].append([ "%s.%s. AAAA" % (item['Name'], zi['dns_suffix']), "%s.%s. 60 AAAA %s" % (item['Name'], zi['dns_suffix'], ipv6) ]) if zi['dns_reverse6'] != '-': this_reverse = IPy.IP(ipv6).reverseNames()[0] if this_reverse.endswith(zi['dns_reverse6']): zone_updates[zi['dns_reverse6']].append([ "%s PTR" % (IPy.IP(ipv6).reverseNames()[0]), "%s 60 PTR %s.%s." % (IPy.IP(ipv6).reverseNames()[0], item['Name'], zi['dns_suffix']) ]) else: logger.error("IPv6 address %s don't match reverse %s" % (ipv6, zi['dns_reverse6'])) for cname in item['CNAME']: zone_updates[zi['dns_zone']].append([ "%s 60 CNAME" % (cname), "%s 60 CNAME %s.%s." % (cname, item['Name'], zi['dns_suffix']) ]) for srv in item['SRV']: srv_components = srv.split(':') if len(srv_components) != 2: continue srv_desc, srv_port = srv_components zone_updates[zi['dns_zone']].append([ "%s.%s SRV 0 0 %s %s.%s." % (srv_desc, zi['dns_zone'], srv_port, item['Name'], zi['dns_suffix']), "%s.%s 60 SRV 0 0 %s %s.%s." % (srv_desc, zi['dns_zone'], srv_port, item['Name'], zi['dns_suffix']) ]) for zone, data in zone_updates.items(): for item in data: logger.debug("%s" % (item,)) return zone_updates def _parse_addrtable(self, addrtable): ret = [] dns_suffix = None dns_zone = None dns_reverse4 = None dns_reverse6 = None keys = [] for row in addrtable.find_all('tr'): rowcells = [] if row.find('th'): for cell in row.find_all('th'): keys.append(cell.getText()) continue for cell in row.find_all('td'): plaintext = cell.getText() if plaintext.startswith('$'): argv = plaintext.strip('$').split(' ', 1) dnsgen_command = argv[0] if dnsgen_command == 'DNSGEN-ADDRTABLE': pass elif dnsgen_command == 'DNSGEN-SUBZONE' and len(argv) > 1: args = argv[1].split(' ') if len(args) != 4: break dns_suffix, dns_zone, dns_reverse4, dns_reverse6 = args break else: rowcells.append(plaintext.replace('\xa0', ' ').strip()) if len(rowcells) == 0: continue rowdata = dict(zip(keys, rowcells)) if rowdata['CNAME'] == '': rowdata['CNAME'] = [] else: rowdata['CNAME'] = rowdata['CNAME'].split() if rowdata['SRV'] == '': rowdata['SRV'] = [] else: rowdata['SRV'] = rowdata['SRV'].split(' ') rowdata['_zoneinfo'] = { 'dns_zone': dns_zone, 'dns_suffix': dns_suffix, 'dns_reverse4': dns_reverse4, 'dns_reverse6': dns_reverse6 } ret.append(rowdata) return ret def _locate_addrtables(self, bs_data): ret = [] tbls = bs_data.find_all('table') for tbl in tbls: try: first_td = tbl.find('td') if first_td is not None: hint = first_td.next_element.getText() if hint == '$DNSGEN-ADDRTABLE': ret.append(tbl) return ret except: pass
def distributed_confluence(self, output_ij_list): # 获取产流数据 grid_runoff = self._distributed_runoff() # 汇流计算实例化 confluence = Confluence(self.dt, self.dx, self.a3, self.b3, self.a4, self.b4, self.CS, self.CI, self.CG) # 河道栅格汇流计算 def river_flow(i, j): k, grid_input = find_input(i, j, fdr) # 找到所有指向该河道栅格的栅格 not_river_list = [ item for item in grid_input if item not in river_mark_list ] river_list = [ item for item in grid_input if item in river_mark_list ] # 线性叠加所有非河道栅格的坡地汇流之后的过程 R_not_river = np.zeros(day_evap.size) for ij in not_river_list: grid_id = ij_toid(ij, ncols) RS, RI, RG = grid_runoff[grid_id] # 坡面汇流之后的结果线性叠加(序列值) RS_slope = confluence.surface_confluence(RS) RI_slope = confluence.interflow_confluence(RI) RG_slope = confluence.underground_confluence_1(RG) R_not_river += (RS_slope + RI_slope + RG_slope) if not river_list: # 到了河道栅格的源头了 # 该栅格本身所产生的净雨作为旁侧入流处理 grid_id = ij_toid([i, j]) RS, RI, RG = grid_runoff[grid_id] qlat = (RS + RI + RG) * self.U # 旁侧入流 # 此时所有上游栅格线性叠加作为计算栅格的入流过程 R = np.zeros(day_evap.size) for dt_id in range(1, day_evap.size): R[dt_id] = confluence.musking_cunge( R_not_river[dt_id - 1], R_not_river[dt_id], R[dt_id - 1], qlat[dt_id], fac[i, j]) return R # 返回该河道栅格出流过程 else: # 如果不是源头河道栅格,即该栅格上游仍有河道栅格汇入 # 上游河道栅格的出流过程线性叠加作为计算栅格的入流过程 R_in = np.zeros(day_evap.size) for ij in river_list: R = river_flow(ij[0], ij[1]) # 递归运算,算法精髓!! R_in += R # 坡面栅格的出流过程与栅格本身净雨(产流)作为旁侧入流 grid_id = ij_toid([i, j]) RS, RI, RG = grid_runoff[grid_id] # 本身产流 qlat = R_not_river + (RS + RI + RG) * self.U # 旁侧入流 R = np.zeros(day_evap.size) for dt_id in range(1, day_evap.size): R[dt_id] = confluence.musking_cunge( R_in[dt_id - 1], R_in[dt_id], R[dt_id - 1], qlat[dt_id], fac[i, j]) return R q = np.zeros(day_evap.size) for ij_tuple in output_ij_list: q += river_flow(ij_tuple[0], ij_tuple[1]) return q
class DNSUpdaterNG(object): def __init__(self, base_url, username, password, page_id, verify_ssl=True): self.page_id = page_id self.c = Confluence(base_url, username, password, verify_ssl=verify_ssl) def update(self): page = self.c.get_page(self.page_id) if not page: raise Exception("Failed to fetch confluence page") data = BeautifulSoup(page, "lxml") stored_hash = None page_hash = hashlib.sha512(page.encode('ascii', errors='ignore')).hexdigest() try: hf = open(PERSISTENT_HASH_FILE) stored_hash = hf.read() hf.close() except: pass if stored_hash == page_hash: logger.info('equality via hash, no-op') return else: hf = open(PERSISTENT_HASH_FILE, 'w') hf.write(page_hash) hf.close() logger.info('updating DNS') addrtables = self._locate_addrtables(data) addrtable_data = [] for addrtable in addrtables: for addrtable_info in self._parse_addrtable(addrtable): addrtable_data.append(addrtable_info) zone_updates = self._build_batch_update(addrtable_data) self._update_all_zones(zone_updates) logger.info('all done') def _update_all_zones(self, zone_updates): commands = [] for zonename in zone_updates.keys(): commands.append('server 127.1') commands.append('zone %s' % (zonename)) updates = zone_updates[zonename] for update in updates: commands.append('update delete %s' % (update[0])) commands.append('update add %s' % (update[1])) commands.append('send') cmd_out = '\n'.join(commands).encode('ascii', errors='ignore') logger.debug(cmd_out.decode("ascii")) p = Popen(['nsupdate'], stdout=PIPE, stdin=PIPE, stderr=PIPE) stdout, stderr = p.communicate(input=cmd_out)[0:2] ser = stderr.decode('ascii', errors='ignore') if ser.strip() != '': logger.error("%s: %s" % (zonename, ser.strip())) commands = [] def _build_batch_update(self, addrtable_data): zone_updates = {} for item in addrtable_data: zi = item['_zoneinfo'] if zi['dns_zone'] != '-' and zi['dns_zone'] not in zone_updates: zone_updates[zi['dns_zone']] = [] if zi['dns_reverse4'] != '-' and zi[ 'dns_reverse4'] not in zone_updates: zone_updates[zi['dns_reverse4']] = [] if zi['dns_reverse6'] != '-' and zi[ 'dns_reverse6'] not in zone_updates: zone_updates[zi['dns_reverse6']] = [] if item['Name'] == '': continue if item['A'] != '': if zi['dns_zone'] != '-': zone_updates[zi['dns_zone']].append([ "%s.%s. A" % (item['Name'], zi['dns_suffix']), "%s.%s. 60 A %s" % (item['Name'], zi['dns_suffix'], item['A']) ]) if zi['dns_reverse4'] != '-': this_reverse = IPy.IP(item['A']).reverseNames()[0] if this_reverse.endswith(zi['dns_reverse4']): zone_updates[zi['dns_reverse4']].append([ "%s PTR" % (IPy.IP(item['A']).reverseNames()[0]), "%s 60 PTR %s.%s." % (IPy.IP(item['A']).reverseNames()[0], item['Name'], zi['dns_suffix']) ]) else: logger.error("IP address %s don't match reverse %s" % (item['A'], zi['dns_reverse4'])) if item['AAAA'] != '': ipv6 = item['AAAA'] if '/' in ipv6: ipv6, crap = ipv6.split('/', 1) if zi['dns_zone'] != '-': zone_updates[zi['dns_zone']].append([ "%s.%s. AAAA" % (item['Name'], zi['dns_suffix']), "%s.%s. 60 AAAA %s" % (item['Name'], zi['dns_suffix'], ipv6) ]) if zi['dns_reverse6'] != '-': this_reverse = IPy.IP(ipv6).reverseNames()[0] if this_reverse.endswith(zi['dns_reverse6']): zone_updates[zi['dns_reverse6']].append([ "%s PTR" % (IPy.IP(ipv6).reverseNames()[0]), "%s 60 PTR %s.%s." % (IPy.IP(ipv6).reverseNames()[0], item['Name'], zi['dns_suffix']) ]) else: logger.error("IPv6 address %s don't match reverse %s" % (ipv6, zi['dns_reverse6'])) for cname in item['CNAME']: zone_updates[zi['dns_zone']].append([ "%s 60 CNAME" % (cname), "%s 60 CNAME %s.%s." % (cname, item['Name'], zi['dns_suffix']) ]) for srv in item['SRV']: srv_components = srv.split(':') if len(srv_components) != 2: continue srv_desc, srv_port = srv_components zone_updates[zi['dns_zone']].append([ "%s.%s SRV 0 0 %s %s.%s." % (srv_desc, zi['dns_zone'], srv_port, item['Name'], zi['dns_suffix']), "%s.%s 60 SRV 0 0 %s %s.%s." % (srv_desc, zi['dns_zone'], srv_port, item['Name'], zi['dns_suffix']) ]) for zone, data in zone_updates.items(): for item in data: logger.debug("%s" % (item, )) return zone_updates def _parse_addrtable(self, addrtable): ret = [] dns_suffix = None dns_zone = None dns_reverse4 = None dns_reverse6 = None keys = [] for row in addrtable.find_all('tr'): rowcells = [] if row.find('th'): for cell in row.find_all('th'): keys.append(cell.getText()) continue for cell in row.find_all('td'): plaintext = cell.getText() if plaintext.startswith('$'): argv = plaintext.strip('$').split(' ', 1) dnsgen_command = argv[0] if dnsgen_command == 'DNSGEN-ADDRTABLE': pass elif dnsgen_command == 'DNSGEN-SUBZONE' and len(argv) > 1: args = argv[1].split(' ') if len(args) != 4: break dns_suffix, dns_zone, dns_reverse4, dns_reverse6 = args break else: rowcells.append(plaintext.replace('\xa0', ' ').strip()) if len(rowcells) == 0: continue rowdata = dict(zip(keys, rowcells)) if rowdata['CNAME'] == '': rowdata['CNAME'] = [] else: rowdata['CNAME'] = rowdata['CNAME'].split() if rowdata['SRV'] == '': rowdata['SRV'] = [] else: rowdata['SRV'] = rowdata['SRV'].split(' ') rowdata['_zoneinfo'] = { 'dns_zone': dns_zone, 'dns_suffix': dns_suffix, 'dns_reverse4': dns_reverse4, 'dns_reverse6': dns_reverse6 } ret.append(rowdata) return ret def _locate_addrtables(self, bs_data): ret = [] tbls = bs_data.find_all('table') for tbl in tbls: try: first_td = tbl.find('td') if first_td is not None: hint = first_td.next_element.getText() if hint == '$DNSGEN-ADDRTABLE': ret.append(tbl) return ret except: pass
def main() -> None: usage = ( '\n{0} --baseurl BASEURL [--user USER] ([--space SPACE] --title TITLE | --pageid PAGEID) [--new-title NEW_TITLE] ([--file FILE] | --text TEXT)' '\n{0} --baseurl BASEURL [--user USER] ([--space SPACE] --new-title NEW_TITLE ([--file FILE] | --text TEXT)' '\n{0} (-h | --help)') parser = argparse.ArgumentParser( usage=usage.format(os.path.basename(sys.argv[0]))) parser.add_argument( "--baseurl", required=True, help='Conflunce base URL. Format: https://example.com/confluence') parser.add_argument( "--user", default=None, help= "User name to log into Confluence. Default: from the environment or password database." ) parser.add_argument("--pageid", type=int, help="Conflunce page id to edit page.") parser.add_argument( "--space", default=None, help= "Conflunce space key to create/edit page. Default: the user's home.") parser.add_argument("--title", default=None, help="Conflunce page title to edit page.") parser.add_argument( "--new-title", default=None, help= "New title to create/edit page. By default title is not changed on edit page." ) group = parser.add_mutually_exclusive_group() group.add_argument( "--file", type=argparse.FileType('r'), default=sys.stdin, help="Write the content of FILE to the confluence page. Default: STDIN." ) group.add_argument( "--text", help= "Write the TEXT in Confluence Storage Format (XHTML-like) to the confluence page." ) args = parser.parse_args() if (args.pageid, args.title, args.new_title).count(None) == 3: parser.print_usage() exit() if args.space is None: args.space = '~' + args.user if args.text is not None: content = args.text else: content = args.file.read() confluence = Confluence(args.baseurl, args.user) confluence.post_page(args.pageid, args.space, args.title, args.new_title, content)
class DNSUpdater(object): def __init__(self, base_url, username, password, page_id, verify_ssl=True): self.page_id = page_id self.c = Confluence(base_url, username, password, verify_ssl=verify_ssl) def update(self): t, zonelist = self.get_table() if not t: logger.error("Cannot get network list.") records = self.generate_forward_records(t, zones=zonelist) reverses = self.generate_reverse_records(zones=zonelist, records=records) for zone in records: self.generate_zonefile(zone, records[zone]['records'], records[zone]['zone']) for zone in reverses: self.generate_zonefile(zone, reverses[zone]['records'], reverses[zone]['zone']) def generate_zonefile(self, name, records, zone): zonefile = open(os.path.join(os.path.dirname(__file__), 'zones/%s' % name), 'w') out = lambda x: zonefile.write("%s\n" % x) out(";; %s" % zone) out(";; Autogenerated by %s at %s" % (__file__, datetime.now())) out("$TTL 60") out("@ IN SOA %s. %s. %d 3H 2H 2W 6H" % (zone['master'].rstrip('.'), zone['hostmaster'].rstrip('.'), time.time())) out("") for dns_server in zone['servers']: out("@ IN NS %s." % dns_server.rstrip('.')) out("") for record in records: out(record) out("") out("$INCLUDE /etc/bind/zones/%s.manual" % name.rstrip('.')) def get_table(self): page = self.c.get_page(self.page_id) if not page: logger.error("Cannot get page %s" % self.page_id) return soup = BeautifulSoup(page, 'lxml') networks = {} zones = {} network = None for table in soup.find_all('table'): lines = parse_xml_table(repr(table)) if 'ZONE' in lines[0]: for line in lines: if not 'ZONE' in line or line['ZONE'] is None: continue zone_name = line[_('ZONE')].split("#", 1)[-1].strip() zones[zone_name] = {'hostmaster': line[_('hostmaster')].strip(), 'master': line[_('master')].strip(), 'servers': [x.strip() for x in fix_confluence_spaces(line[_('servers')]).split()]} continue if _('Name') not in lines[0] or lines[0][_('Name')] is None or not lines[0][_('Name')].startswith('# '): continue for line in lines: skip = False for item in ['Name', 'v4', 'v6', 'CNAME', 'Description']: item = _(item) if not item in line or line[item] is None: skip = True if skip: if _('Name') in line and line[_('Name')] and line[_('Name')].startswith("# "): network = line[_('Name')].split("#",1)[1].strip() if network not in networks: networks[network] = [] continue networks[network].append(line) return networks, zones def generate_forward_records(self, table, zones): zones = {k: {'records': [], 'keys': [], 'zone': v} for k, v in zones.items() if not k.endswith('.arpa') and not k.endswith('.arpa.')} for network, items in table.items(): zone = find_longest_match(network, zones.keys()) if not zone: logger.error("Cannot find zone for network %s" % network) continue network_part = network[:len(network) - len(zone)].strip('.') for item in items: if network_part: name = "%s.%s" % (item[_('Name')], network_part) else: name = item[_('Name')] zones[zone]['keys'].append(name) if 'v4' in item and item['v4'] is not None: if is_valid_ipv4_address(item['v4']): zones[zone]['records'].append(Record(name, 'A', item['v4'], zone)) have_address = True if 'v6' in item and item['v6'] is not None: if is_valid_ipv6_address(item['v6']): zones[zone]['records'].append(Record(name, 'AAAA', item['v6'], zone)) have_address = True if 'CNAME' in item and item['CNAME'] is not None: for cname in item['CNAME'].split(): cname = cname.strip().rstrip('.') cname_network = find_longest_match(cname, zones.keys()) if not cname_network: logger.info("Network not found for %s" % cname) cname_network = zone cname = '%s.%s' % (cname, cname_network) cname_part = cname[:len(cname) - len(cname_network) - 1] if cname_part in zones[cname_network]['keys']: logger.error("Cannot add CNAME for %s, address have already other records!" % cname) continue zones[cname_network]['records'].append(Record(cname_part, 'CNAME', "%s.%s." % (name, zone), cname_network)) zones[cname_network]['keys'].append(cname_part) return zones def generate_reverse_records(self, zones, records): zones = {"%s." % k.rstrip('.'): {'records': [], 'keys': [], 'zone': v} for k,v in zones.items() if k.endswith('.arpa') or k.endswith('.arpa.')} for network in records: for record in records[network]['records']: if record.type not in ['A', 'AAAA']: continue reverse = record.reverse() reverse_zone = find_longest_match(reverse, zones.keys()) if not reverse_zone: logger.error("Cannot find reverse zone for %s (%s)" % (reverse, record.fqdn)) continue key = reverse[:len(reverse) - len(reverse_zone) - 1] zones[reverse_zone]['records'].append(Record(key, 'PTR', record.fqdn, reverse_zone)) return zones
def __init__(self, base_url, username, password, page_id, verify_ssl=True): self.page_id = page_id self.c = Confluence(base_url, username, password, verify_ssl=verify_ssl)
def test_Confluence(self): assert Confluence.Confluence() == "This is Main module for Confluence Project that do nothing.\nRead more about DevOpsHQ Community here: https://github.com/devopshq/ExampleProject"