class ConfluenceManager: def __init__(self, credentials, path): self.remote = Confluence(url=credentials[0], username=credentials[1], password=credentials[2]) def converter_to_local_key(key): if isinstance(key, tuple): return 'name_' + key[0] + key[1] else: return 'id_' + key self.cached = CachedDataProvider( self, path, converter_to_local_key=converter_to_local_key) self.simplified = CachedDataProvider( ConfluenceSimplifiedTextProvider(self.cached), path + '.simplified', converter_to_local_key=converter_to_local_key) def extract_url(self, raw_page): return self.remote.url + raw_page['_links']['webui'] def extract_title(self, raw_page): return raw_page['title'] def __get_page_id(self, id_): if isinstance(id_, tuple): id_ = self.remote.get_page_id(id_[0], id_[1]) return id_ def get_page(self, id_, full_info=True): id_ = self.__get_page_id(id_) expand = 'children.page.id' if full_info: expand = 'version,body.storage,children.page.id' raw = self.remote.get_page_by_id(id_, expand=expand) return raw def get_simple_text(self, id_): return self.simplified[id_] def get_page_tree_ids(self, id_): page = self.get_page(id_, full_info=False) ret = [page['id']] children_ids = [r['id'] for r in page['children']['page']['results']] for id_ in children_ids: ret += self.get_page_tree_ids(id_) return ret def __getitem__(self, key): if isinstance(key, tuple): remote_key = self.remote.get_page_id(key[0], key[1]) else: remote_key = key ret = self.remote.get_page_by_id( remote_key, expand='version,body.storage,children.page.id') return ret
def update_html_content(login, passw, html): c = Confluence(BASE_URL, login, passw) page = c.get_page_by_id(page_id=JENKINS_PAGE_ID, expand='body.storage') resp = c.update_existing_page(page_id=JENKINS_PAGE_ID, body=html, title=page['title']) return resp.ok
def check_last_updated_by(page_id: int, username_to_check: str, confluence_instance: Confluence) -> (bool, str): """Checks which user last updated `page_id`. If it's not `username_to_check` — return False :param page_id: ID of the page to check :param username_to_check: compare this username against the one that last updated the page :param confluence_instance: instance of Confluence to run the check """ page_last_updated_by = confluence_instance.get_page_by_id( page_id, expand="version")["version"]["by"] if confluence_instance.api_version == "cloud": page_last_updated_by = page_last_updated_by[ "email"] # pragma: no cover else: page_last_updated_by = page_last_updated_by["username"] return page_last_updated_by == username_to_check, page_last_updated_by
def main(oauth_token, space, parent_page, url, user_id, overwrite=True): global LUA, CONF, SPACE SPACE = space LUA = __file__.replace("__main__.py","confluence.lua") CONF = Confluence(url,user_id,oauth_token) parent = CONF.get_page_by_id(parent_page) if 'statusCode' in parent: raise Exception ("parent_page %s not found" % (parent_page)) if overwrite: child_pages = CONF.get_child_pages(parent_page) for child_page in child_pages: CONF.remove_page(child_page['id'],recursive=True) source_folder = os.getcwd() create_page(source_folder, parent_page)
def confluence_get_page(): #Конфиг base_path = os.path.dirname(os.path.abspath(__file__)) #применяем конфигурацию по умолчанию #config_path_default = os.path.join(base_path, "dafault.conf") config_path = os.path.join(base_path, "config.conf") if os.path.exists(config_path): cfg = configparser.ConfigParser() #cfg.read(config_path_default) cfg.read(config_path) else: print( "Конфиг не обнаружен! см. документацию по настройке https://github.com/AndreyAgafonov/python/tree/master/birthday" ) sys.exit(1) # Парсим конфиг файл section = "confluence" confluence_url = cfg.get(section, "confluence_connection_string") confluence_pageid = cfg.get(section, "confluence_pageid") confluence_user = cfg.get(section, "confluence_user") confluence_passwd = cfg.get(section, "confluence_passwd") section = "general" general_col_name = cfg.get(section, "column_date_of_birth") # Подключение к конфлюенсе confluence = Confluence(url=confluence_url, username=confluence_user, password=confluence_passwd) content1 = confluence.get_page_by_id(confluence_pageid, expand="body.storage") ##Первая колонка это порядковый номер - используем ее как индекс index_col=0 ##Заголовок в первой строке - ее тоже не учитываем header=0 ## df_list = pd.read_html(content1['body']['storage']['value'], header=0, index_col=0, na_values=['No Acquirer'], converters={general_col_name: str}) table = df_list[0] table.index.name = "Number" return table
class ConfluencePlayground(): def __init__(self): conf = configparser.ConfigParser() conf.read('credentials.ini') self.site = Confluence(url=conf.get('CONFLUENCE', 'url'), username=conf.get('CONFLUENCE', 'username'), password=conf.get('CONFLUENCE', 'password')) def get_rnu_in_page(self, page_id, rnu_id): """Get the the data from a ‘QC - Read and Understood’ Macro""" # Get context page page = self.site.get_page_by_id(page_id, expand='body.export_view') page_cnt = page['body']['export_view']['value'] # Parse the HMTL soup = BeautifulSoup(page_cnt, "lxml") # Get data from span element div_elm = soup.find( "div", text="read-and-understood-data-{id}".format(id=rnu_id)) parent_elm = div_elm.parent span_elem = parent_elm.find("span") return json.loads(span_elem.get_text())
def get_html_content(login, passw): c = Confluence(BASE_URL, login, passw) page = c.get_page_by_id(page_id=JENKINS_PAGE_ID, expand='body.storage') return page['body']['storage']['value']
from pprint import pprint from atlassian import Confluence confluence = Confluence( url='http://localhost:8090', username='******', password='******') # If you know Space and Title content1 = confluence.get_page_by_title( space='SPACE' title='page title') pprint(content1) # If you know page_id of the page content2 = confluence.get_page_by_id(page_id=1123123123) pprint(content2)
class ConfluenceSearch(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) self.all_confluence_search_results = '' self.delete_these_results = [] self.parse_these_results = {} def _establish_confluence_connection(self): """ Setup the confluence object to be interacted with later :return: """ # URL needs to be in the settingsmeta.yaml # confluence username needs to be in the settingsmeta.yaml # confluence password needs to be in the settingsmeta.yaml self.username = self.settings.get('user_name') self.password = self.settings.get('password') self.confluence_url = self.settings.get('confluence_url') self.confluence = Confluence(url=self.confluence_url, username=self.username, password=self.password) def _setup_telegram_bot(self): """ Sets up the telegram bot key and the channel id to post to :return: Nothing """ # Setup the telegram bot # botkey needs to be in the settingsmeta.yaml self.botkey = self.settings.get('telegram_bot_key') self.updater = Updater(token=self.botkey, use_context=True) # chat_id needs to be in the settingsmeta.yaml self.chat_id = self.settings.get('telegram_chat_id') def create_url_dict(self, confluence_search_results): """ This is used to create a dictionary of results so that we can parse it later :param confluence_search_results: This is a json dict that is obtained from confluence :return: A simplified dictionary with the key equal to the Page Title and the URL for the user to click on """ results_dict = {} for individual_result in confluence_search_results['results']: results_dict[individual_result[ 'title']] = self.confluence.url + individual_result['_links'][ 'webui'] return results_dict def handle_display_more_context(self): """ This function deletes already processed items from the all_confluence_search_results object Then if there are items left in the list, it calls the process_url_list() to create a loop :return: """ for entry in self.delete_these_results: del self.all_confluence_search_results[entry] if len(self.all_confluence_search_results) > 0: self.process_url_list() def process_url_list(self): """ This function loops over the confluence list in order to send them to Telegram It also tracks the items that have sent to Telegram. Finally, it asks the user whether they want more urls to be displayed :return: Nothing """ self.delete_these_results = [] for index, title in enumerate(self.all_confluence_search_results): text = "%s \n%s" % (title, self.all_confluence_search_results[title]) self.send_message_to_chat(text) self.delete_these_results.append(title) # Only send 2 entries at a time. Once we get to the second index, break the loop if (index + 1) % 2 == 0: break # wait for a few seconds to let the user review the results so they know # whether they got the page they wanted time.sleep(5) if len(self.all_confluence_search_results) > 2: response = self.get_response( "Would you like to display more results?") if response == "no" or response == "nope" or response is None: exit() self.handle_display_more_context() def send_message_to_chat(self, text): self.updater.bot.send_message(chat_id=self.chat_id, text=text) @intent_file_handler("search.confluence.intent") def handle_search_confluence_title(self, message): """ This is the main function. It searches confluence for a title containing the user-provided search terms It optionally filters the results based on the parent page specified by the user :param message: Mycroft data :return: Nothing """ self._setup_telegram_bot() self._establish_confluence_connection() user_specified_title = message.data.get('page') # The parent page is optional, it will be None if not determined by the intent parent_page = message.data.get('parentpage') url = 'rest/api/content/search' params = {} params['cql'] = 'type={type} AND title~"{title}"'.format( type='page', title=user_specified_title) params['start'] = 0 params['limit'] = 10 # call the atlassian library to get a list of all the possible title matches response = self.confluence.get(url, params=params) # Do some extra work if we need to narrow the results by the parent page if parent_page is not None: self.parse_these_results['results'] = [] for individual_page_results in response['results']: page_id = individual_page_results['id'] # get the parent page information parent_content = ((self.confluence.get_page_by_id( page_id=page_id, expand='ancestors').get('ancestors'))) # loop over the parent page titles and see if they match the user's utterance for parent_page_results in parent_content: if parent_page.lower( ) == parent_page_results['title'].lower(): self.parse_these_results['results'].append( individual_page_results) text = "Under the heading: %s \nI found the following results for the search containing: %s " \ % (parent_page.upper(), user_specified_title.upper()) else: self.parse_these_results = response text = "I found the following results for the search containing: %s" % user_specified_title.upper( ) self.all_confluence_search_results = self.create_url_dict( self.parse_these_results) self.send_message_to_chat(text) self.process_url_list()
expand=None, include_archived_spaces=None, excerpt=None) page_list = page_list1['results'] #검색한 Wiki 페이지의 ID들을 저장할 변수 pageid = [] for i in range(0, len(page_list)): pageid.append(page_list[i]['content']['id']) #전체 테스트 결과를 저장할 변수 test_result = [] for i in range(0, len(pageid)): page_info_body1 = confluence.get_page_by_id(pageid[i], expand='body.storage') soup = BeautifulSoup(page_info_body1['body']['storage']['value'], 'html.parser') if soup.find('table'): table = soup.find_all('table') #페이지 내에서 두번째 테이블의 데이터를 저장 for j in range(0, len(table)): temp = [] table_th = table[j].find_all('th') table_td = table[j].find_all('td') if len(table_th ) != 0 and table_th[0].text == 'SDK Name(Device)': for k in range(0, len(table_th)): if table_th[k].text == 'SDK Name(Device)': temp.append(table_td[k].text) if table_th[k].text == 'SDK Name(Platform)': temp.append(table_td[k].text)
from atlassian import Confluence from bs4 import BeautifulSoup from datetime import datetime #Wiki auth confluence = Confluence(url='https://wiki.telechips.com:8443', username='******', password='******') #현재 날짜 저장 day = datetime.now() today = day.day #wiki page source code get page_info_body1 = confluence.get_page_by_id( 169361671, expand='space,body.view,version,container') #페이지의 항목만 html 형식으로 Parse temp = page_info_body1['body']['view']['value'] soup = BeautifulSoup(temp, 'html.parser') #QAC 사용현황 Table 코드만 find table = soup.find_all('table') #ip를 저장할 리스트 선언 text = [] #for문으로 현재 날짜 행의 td 태그의 text만 저장, qac_no 에는 qac 서버 번호를 입력하면 됨 QAC2=2, QAC4=3, QAC5=4 qac_no = 4 for j in range(0, len(table[qac_no].find_all('td'))): if table[qac_no].find_all('td')[j].text == str(today):
class Readme2Confluence: """ Main class that will create the Confluence page from Markdown """ def __init__(self, url, username, password, space): self.confluence = Confluence(url, username=username, password=password) spaces = self.confluence.get_all_spaces(start=0, limit=500) if any(s for s in spaces if s["name"] == space): self.space = space else: raise ValueError("{} is not valid Confluence Space".format(space)) def get_page_id(self, title): """ Retrieve the ID of a page using it's title This is basically a small wrapper around confluence.get_page_id(). The main reason for the wrapper is to better handle pages that aren't found by raising an exception """ page_id = self.confluence.get_page_id(self.space, title) if page_id is None: raise ValueError("{} is not a valid page title in {}".format( title, self.space)) return page_id def get_page_html(self, title): """ Retrieve a confluence page and return it as a HTML (string) """ page_id = self.get_page_id(title) return self.confluence.get_page_by_id(page_id) def send2confluence(self, title, parent_title=None, md_file=None, md_text=""): """ Method that creates the confluence page. Page will either be created or updated depending on what is required. """ if md_file is None: body = md2html(md_text) else: body = md2html(get_md_text(md_file)) parent_id = None if parent_title is None else self.get_page_id( parent_title) page_id = self.confluence.get_page_id(self.space, title) if page_id is None: ret = self.confluence.create_page(self.space, title, body, parent_id=parent_id, type='page', representation='storage') else: ret = self.confluence.update_page(page_id, title, body, parent_id=parent_id, type='page', representation='storage') return ret
def update_confluence(words, progress): confluence = Confluence(url='http://docs.fscut.com', username='******', password='******') #if progress['oa'] > 0: # confluence_dicts = [] #读取id with open(r'data\id_confluence.txt', "r", encoding="utf-8") as f: ids = f.read().split() for id in ids[1:]: #根据id从接口获取页面信息 page_info = confluence.get_page_by_id( id, expand='space,body.view,version,container') content = page_info['body']['view']['value'] content = page_info['body']['view']['value'] soup = BeautifulSoup(content, 'lxml') article = soup.text title = page_info['title'] if len(title) < 15: continue confluence_dict = {} print(id) #根据id读取本地文章 #path = os.path.join('data','reports', id + '.txt') # with open(path, "r", encoding='utf-8') as f: # title = f.readline().split() # article = f.read() #分词 seg = segmentation(article.split()) print(seg) word_count = Counter(seg) #print(word_count) wordn = sum(word_count.values()) for word in word_count: word_count[word] = word_count[word] / wordn #print(word_count) words += Counter(word_count.keys()) confluence_dict['id'] = id confluence_dict['seg'] = word_count confluence_dict['title'] = title confluence_dict['text'] = article confluence_dict['date'] = title.split(' ')[0] print(confluence_dict['date']) #从标题解析作者 confluence_dict['author'] = re.split('[(-]', title)[-2] print(confluence_dict['author']) confluence_dict[ 'url'] = 'http://docs.fscut.com/pages/viewpage.action?pageId=' + id confluence_dicts.append(confluence_dict) confluence_df = pd.DataFrame(confluence_dicts) print(confluence_df) confluence_df.to_json('data/seg/articles_confluence.json') progress['confluence'] = len(confluence_dicts)
keys = {} if path.exists('.keys'): with open('.keys') as f: keys = json.load(f) url = keys.get('url') or input('Confluence URL:').strip() email = keys.get('email') or input('Email address:').strip() api_key = keys.get('api_key') or input( 'API Key for Atlassian (https://id.atlassian.com/manage/api-tokens):' ).strip() confluence = Confluence(url=url, username=email, password=api_key) parent = keys.get('parent') or int(input("Parent page ID:").strip()) parent_page = confluence.get_page_by_id(parent) while not isinstance(parent_page, dict): email = input('Email address:').strip() api_key = input( 'API Key for Atlassian (https://id.atlassian.com/manage/api-tokens):' ).strip() confluence = Confluence(url=url, username=email, password=api_key) parent_page = confluence.get_page_by_id(parent) while not input(f"Create page under {parent_page['title']}? [y/n]:").strip( ).lower().startswith('y'): space = input("Confluence Space ID:").strip() parent = input("Parent page ID:").strip() parent_page = confluence.get_page_by_id(parent) boards = None
class ConfluencePublisher(): def __init__( self, url, username, api_token, page_title_prefix, markdown_dir, db_path, space, parent_pageid, force_update=False, force_delete=False, skip_update=False, verbose=False): self.api = Confluence(url=url, username=username, password=api_token) self.page_title_prefix = page_title_prefix self.markdown_dir = markdown_dir self.kv = KeyValue(db_path) self.space = space self.parent_pageid = parent_pageid self.force_update = force_update self.force_delete = force_delete self.skip_update = skip_update self.confluence_renderer = ConfluenceRenderer(verbose) self.renderer = mistune.create_markdown( renderer=self.confluence_renderer, plugins=[ plugin_front_matter, DirectiveInclude(), HugoRefLinkPlugin(self.markdown_dir), 'strikethrough', 'footnotes', 'table', 'url', Admonition(), plugin_html_comment, ] ) def __update_page(self, space, parentid, filepath, autoindex=False): metadata = self.kv.load(filepath) current_title = metadata['title'] current_hash = metadata['sha256'] sha_hash = get_file_sha256(filepath) # --- Render (BEGIN) body = '' state = {'front_matter': {}} if autoindex: body = generate_autoindex() state['front_matter']['title'] = \ os.path.basename(os.path.dirname(filepath)).title() else: if filepath.endswith("_index.md"): body = generate_autoindex() body += self.renderer.read(filepath, state) title = '{}{}'.format(self.page_title_prefix, state['front_matter']['title']) # --- Render (END) if current_title and current_title != title: print('REN => Title: ' + title) confluence_page_id = self.api.get_page_id(space, current_title) self.api.update_page(confluence_page_id, title, body) if current_hash != sha_hash or self.force_update: if autoindex: print('IDX => Title: ' + title) else: print('UPD => Title: ' + title) if self.api.update_or_create( parent_id=parentid, title=title, body=body, representation='storage' ): confluence_page_id = self.api.get_page_id(space, title) self.kv.save(filepath, {'id': confluence_page_id, 'title': title, 'sha256': sha_hash}) return confluence_page_id return None else: print('SKP => Title: ' + title) return self.api.get_page_id(space, title) def __delete_attachment(self, filepath): metadata = self.kv.load(filepath) filename = os.path.basename(filepath) if metadata['id']: try: print('DEL Att. => Title: ' + filename) # https://confluence.atlassian.com/confkb/confluence-rest-api-lacks-delete-method-for-attachments-715361922.html # self.api.delete_attachment_by_id(metadata['id'], 1) self.api.remove_content(metadata['id']) except (HTTPError, ApiError): pass def __update_attachment(self, space, pageid, filepath): filename = os.path.basename(filepath) print('UPD Att. => Title: ' + filename) results = self.api.attach_file(filepath, name=filename, page_id=pageid, space=space) confluence_page_id = results['id'] if 'id' in results else results['results'][0]['id'] self.kv.save(filepath, {'id': confluence_page_id, 'title': filename, 'sha256': None}) return confluence_page_id def __publish_recursive(self, space, parentid, path, root=False): # File: _index.md index_parentid = parentid index_path = path + os.sep + '_index.md' if not root: if os.path.isfile(index_path): # Use local _index.md file index_parentid = self.__update_page( space, parentid, index_path) else: # Autoindex simulate _index.md in Confluence if missing locally index_parentid = self.__update_page( space, parentid, index_path, True) # Directories: */ for f in os.scandir(path): if f.is_dir(): self.__publish_recursive(space, index_parentid, f.path) # Files: *.* (Except _index.md) for f in os.scandir(path): if f.is_file(): if f.path.endswith(".md"): if not f.path.endswith(os.sep + '_index.md'): self.__update_page(space, index_parentid, f.path) else: self.__delete_attachment(f.path) self.__update_attachment(space, index_parentid, f.path) def delete(self): for filepath in sorted(self.kv.keys()): metadata = self.kv.load(filepath) # Page has Sub-pages (Childs)? index_with_childs = False if filepath.endswith('_index.md'): childs = 0 if os.path.isdir(os.path.dirname(filepath)): for f in os.scandir(os.path.dirname(filepath)): if f.path.endswith(".md") and \ not f.path.endswith('_index.md'): childs = childs + 1 index_with_childs = childs > 0 if self.force_delete \ or (not os.path.isfile(filepath) and not index_with_childs): print('DEL => Id: ' + metadata['id'] + ', Title: ' + metadata['title']) if filepath.endswith(".md"): try: if self.api.get_page_by_id(metadata['id']): self.api.remove_page(metadata['id']) except HTTPError as ex: code = ex.response.status_code if code != 404: print("DEL Pag. (Error):" + str(code)) else: pass else: self.__delete_attachment(filepath) self.kv.remove(filepath) def publish(self): self.__publish_recursive( self.space, self.parent_pageid, self.markdown_dir, root=True)
# coding: utf8 from atlassian import Confluence confluence = Confluence( url='http://localhost:8090', username='******', password='******') # If you know Space and Title content1 = confluence.get_page_by_title(space='SPACE', title='page title') print(content1) # If you know page_id of the page content2 = confluence.get_page_by_id(page_id=1123123123) print(content2)
class ConfluencePublisher(): def __init__( self, url, username, apiToken, pageTitlePrefix, markdownDir, dbPath, space, parentPageId, forceUpdate=False, forceDelete=False, skipUpdate=False, ): self.api = Confluence(url=url, username=username, password=apiToken) self.pageTitlePrefix = pageTitlePrefix self.markdownDir = markdownDir self.kv = KeyValue(dbPath) self.space = space self.parentPageId = parentPageId self.forceUpdate = forceUpdate self.forceDelete = forceDelete self.skipUpdate = skipUpdate self.confluenceRenderer = ConfluenceRenderer(url) self.metadataPlugin = MetadataPlugin() self.renderer = mistune.create_markdown( renderer=self.confluenceRenderer, plugins=[ 'strikethrough', 'footnotes', 'table', 'url', self.metadataPlugin.plugin_metadata ]) # Hack to allow metadata plugin to work (See mistune/block_parser.py) self.renderer.block.rules.remove('thematic_break') def __getFileContent(self, filepath): file = open(filepath, mode='r') content = file.read() file.close() return content def __updatePage(self, space, parentId, filepath, autoindex=False): if autoindex: markdown = '' else: markdown = self.__getFileContent(filepath) metadata = self.kv.load(filepath) currentTitle = metadata['title'] currentHash = metadata['sha256'] hash = self.kv.sha256(markdown) # --- Render (BEGIN) self.metadataPlugin.stack['title'] = None if autoindex: body = self.confluenceRenderer.generate_autoindex() else: body = self.renderer(markdown) if self.metadataPlugin.stack['title'] is None: if autoindex: title = 'Folder ' + os.path.basename(os.path.dirname(filepath)) else: title = os.path.basename(filepath) else: title = self.metadataPlugin.stack['title'] title = self.pageTitlePrefix + title # >>> Removed: + " [" + self.kv.sha256(filepath)[-6:] + "]" # --- Render (END) if currentTitle and currentTitle != title: print('REN => Title: ' + title) pageId = self.api.get_page_id(space, currentTitle) self.api.update_page(pageId, title, body) if currentHash != hash or self.forceUpdate: if autoindex: print('IDX => Title: ' + title) else: print('UPD => Title: ' + title) if self.api.update_or_create(parent_id=parentId, title=title, body=body, representation='storage'): id = self.api.get_page_id(space, title) self.kv.save(filepath, { 'id': id, 'title': title, 'sha256': hash }) return id else: return None else: print('SKP => Title: ' + title) return self.api.get_page_id(space, title) def __deleteAttachment(self, filepath): metadata = self.kv.load(filepath) filename = os.path.basename(filepath) if metadata['id']: try: print('DEL Att. => Title: ' + filename) # https://confluence.atlassian.com/confkb/confluence-rest-api-lacks-delete-method-for-attachments-715361922.html # self.api.delete_attachment_by_id(metadata['id'], 1) self.api.remove_content(metadata['id']) except (HTTPError, ApiError): pass def __updateAttachment(self, space, pageId, filepath): filename = os.path.basename(filepath) print('UPD Att. => Title: ' + filename) results = self.api.attach_file(filepath, name=filename, page_id=pageId, space=space) id = results['id'] if 'id' in results else results['results'][0]['id'] self.kv.save(filepath, {'id': id, 'title': filename, 'sha256': None}) return id def __publishRecursive(self, space, parentId, path): # File: _index.md indexParentId = parentId indexPath = path + os.sep + '_index.md' if os.path.isfile(indexPath): # Use local _index.md file indexParentId = self.__updatePage(space, parentId, indexPath) else: # Autoindex simulate _index.md in Confluence if missing locally # Except for (root) parentPageId because missing in markdownDir! if parentId != self.parentPageId: indexParentId = self.__updatePage(space, parentId, indexPath, True) # Directories: */ for f in os.scandir(path): if f.is_dir(): self.__publishRecursive(space, indexParentId, f.path) # Files: *.* (Except _index.md) for f in os.scandir(path): if f.is_file(): if f.path.endswith(".md"): if not f.path.endswith(os.sep + '_index.md'): self.__updatePage(space, indexParentId, f.path) else: self.__deleteAttachment(f.path) self.__updateAttachment(space, indexParentId, f.path) def delete(self): for filepath in sorted(self.kv.keys()): metadata = self.kv.load(filepath) # Page has Sub-pages (Childs)? indexWithChilds = False if filepath.endswith('_index.md'): childs = 0 if os.path.isdir(os.path.dirname(filepath)): for f in os.scandir(os.path.dirname(filepath)): if f.path.endswith(".md") and \ not f.path.endswith('_index.md'): childs = childs + 1 indexWithChilds = childs > 0 if self.forceDelete \ or (not os.path.isfile(filepath) and not indexWithChilds): print('DEL => Id: ' + metadata['id'] + ', Title: ' + metadata['title']) if filepath.endswith(".md"): try: if self.api.get_page_by_id(metadata['id']): self.api.remove_page(metadata['id']) except HTTPError as ex: code = ex.response.status_code if code != 404: print("DEL Pag. (Error):" + str(code)) else: pass else: self.__deleteAttachment(filepath) self.kv.remove(filepath) def publish(self): self.__publishRecursive(self.space, self.parentPageId, self.markdownDir)
if art_url is None: art_url = input("Artifactory url: ") template_file = os.getenv('TEMPLATE_FILE') if template_file is None: template_file = os.path.expanduser( input("Template full file path (ENTER to skip): ")) blacklist_file = os.getenv('BLACKLIST_FILE') if blacklist_file is None: blacklist_file = input("Blacklist full file path (ENTER to skip): ") confluence = Confluence(url=confluence_url, username=confluence_username, password=confluence_password, verify_ssl=False) # TODO ssl verify confluence_page = confluence.get_page_by_id(page_id=confluence_page_id, expand='storage') resp = requests.get(art_url + '/api/repositories', verify=False) # TODO ssl verify resp.raise_for_status() repositories = resp.json() blacklist = list() if blacklist_file not in [None, '']: with open(blacklist_file) as f: blacklist = [line.rstrip() for line in f] # get all virt and remote repos virt_repos = dict() remote_repos = dict() for rep in repositories:
print( "########################################################################") print("Looking for page \"" + confluencePage + "\" in space " + confluenceSpace) pageID = confluence.get_page_id(confluenceSpace, confluencePage) if pageID is None: print("Page not found: " + confluencePage + " in space " + confluenceSpace) exit() else: print("Found page : " + confluencePage + " pageID: " + str(pageID)) print( "########################################################################") print("Getting page by ID: " + str(pageID)) contents = confluence.get_page_by_id(pageID, expand="body.storage,version", status="current") print("ID : " + str(contents["id"])) print("Status : " + contents["status"]) print("Title : " + contents["title"]) print("Version : " + str(contents["version"]["number"])) print("Last Updated : " + str(contents["version"]["when"])) print("Body : " + contents["body"]["storage"]["value"]) #print("Full Contents:") #print(contents) print( "########################################################################") print("Update page: " + str(pageID)) newBody = "<p>Hello World " + str(random.randint(0, 100)) + "</p>" try:
from atlassian import Confluence import pandas as pd import sqlite3 from bs4 import BeautifulSoup con = sqlite3.connect('C:/Users/B180093/database/tcs.db') user = pd.read_sql("SELECT * FROM id_pw", con) user_info = user.values.tolist() con.close() #Wiki auth confluence = Confluence(url='https://wiki.telechips.com:8443', username=user_info[0][0], password=user_info[0][1]) page_info_body1 = confluence.get_page_by_id(175321610, expand='body.storage') soup = BeautifulSoup(page_info_body1['body']['storage']['value'], 'html.parser') print(page_info_body1)
#! /usr/bin/env python3 import re from atlassian import Confluence from six.moves.urllib.parse import urljoin from bs4 import BeautifulSoup WIKI_URL = "https://wiki.4paradigm.com" confluence = Confluence( url=WIKI_URL, username="******", password="******") ret = confluence.get_page_by_id("36220156", expand="body") web_view_url = ret["_links"]["webui"] url = urljoin(confluence.url, web_view_url) resp = confluence._session.request( method="GET", url=url, auth=(confluence.username, confluence.password), timeout=confluence.timeout, verify=confluence.verify_ssl) # p = re.compile(r"docker02:35000[a-zA-Z0-9./]+<") p = re.compile(r"docker02:35000[-.:/a-zA-Z0-9]+") soup = BeautifulSoup(resp.text, "html.parser") table = soup.find( "table", attrs={"class": "wrapped relative-table confluenceTable"})