Exemple #1
0
def test():
    import importer
    from api import old_confluence_api, new_confluence_api
    import exporter
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    exporter.dump_page('4358662')
def test():
    import importer
    from api import old_confluence_api, new_confluence_api
    import exporter

    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    exporter.dump_page("4358662")
Exemple #3
0
def batch_import(name, func, *args, **kwargs):
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    success_count = 0
    fail_count = 0
    for page in ordered_pages:
        try:
            func(page['id'], *args, **kwargs)
        except Exception as e:
            logger.debug(e)
            fail_count += 1
            continue
        time.sleep(0.1)
        success_count += 1
        logger.info('import %s, page: %s, title: %s, s/f/t: %d/%d/%d' % (
            name, page['id'], page['title'], success_count, fail_count, success_count + fail_count))
    logger.info('import %s, s/f/t: %d/%d/%d' % (
        name, success_count, fail_count, success_count + fail_count))
Exemple #4
0
def import_pages():
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    success_count = 0
    fail_count = 0
    SKIP_TO_ID = None
    for page in ordered_pages:
        if SKIP_TO_ID is not None:
            if page['id'] == SKIP_TO_ID:
                SKIP_TO_ID = None
            else:
                continue
        try:
            old_parent_id_title = dict([(x['id'], x['title']) for x in pages])
            if not page['parentId'] in old_parent_id_title and not page['parentId'] == '0':
                logger.error('No old parent, title: %s, old page id: %s' % (
                    page['title'], page['parentId']))
                return
            if page['parentId'] == '0':
                new_parent_id = '0'
            else:
                try:
                    new_parent_id = find_page_title_to_page_id(pages, page['parentId'])
                    # new_parent_id = new_confluence_api.getPage(NEW_SPACE_KEY, old_parent_id_title[
                    #     page['parentId']])['id']
                except Exception as e:
                    raise ValueError('cannot locate %s, e: %s' % (page['id'], e))
            import_page(page['id'], new_parent_id)
        except xmlrpc.client.Fault as e:
            if ('Transaction rolled back because it has been marked as rollback-only' in
                    e.faultString):
                logger.info('duplicate, page id: %s, title: %s' % (page['id'], page['title']))
            else:
                fail_count += 1
                logger.error('import error, page_id: %s, title: %s, e: %s' % (
                    page['id'], page['title'], e))
                raise e
        # time.sleep(0.01)
        success_count += 1
        logger.info('import %s, page: %s, title: %s, s/f/t: %d/%d/%d' % (
            'page', page['id'], page['title'], success_count, fail_count,
            success_count + fail_count))
    logger.info('import %s, s/f/t: %d/%d/%d' % (
        'page', success_count, fail_count, success_count + fail_count))
def batch_import(name, func, *args, **kwargs):
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    success_count = 0
    fail_count = 0
    for page in ordered_pages:
        try:
            func(page["id"], *args, **kwargs)
        except Exception as e:
            logger.debug(e)
            fail_count += 1
            continue
        time.sleep(0.1)
        success_count += 1
        logger.info(
            "import %s, page: %s, title: %s, s/f/t: %d/%d/%d"
            % (name, page["id"], page["title"], success_count, fail_count, success_count + fail_count)
        )
    logger.info("import %s, s/f/t: %d/%d/%d" % (name, success_count, fail_count, success_count + fail_count))
def import_pages():
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    success_count = 0
    fail_count = 0
    SKIP_TO_ID = None
    for page in ordered_pages:
        if SKIP_TO_ID is not None:
            if page["id"] == SKIP_TO_ID:
                SKIP_TO_ID = None
            else:
                continue
        try:
            old_parent_id_title = dict([(x["id"], x["title"]) for x in pages])
            if not page["parentId"] in old_parent_id_title and not page["parentId"] == "0":
                logger.error("No old parent, title: %s, old page id: %s" % (page["title"], page["parentId"]))
                return
            if page["parentId"] == "0":
                new_parent_id = "0"
            else:
                try:
                    new_parent_id = find_page_title_to_page_id(pages, page["parentId"])
                    # new_parent_id = new_confluence_api.getPage(NEW_SPACE_KEY, old_parent_id_title[
                    #     page['parentId']])['id']
                except Exception as e:
                    raise ValueError("cannot locate %s, e: %s" % (page["id"], e))
            import_page(page["id"], new_parent_id)
        except xmlrpc.client.Fault as e:
            if "Transaction rolled back because it has been marked as rollback-only" in e.faultString:
                logger.info("duplicate, page id: %s, title: %s" % (page["id"], page["title"]))
            else:
                fail_count += 1
                logger.error("import error, page_id: %s, title: %s, e: %s" % (page["id"], page["title"], e))
                raise e
        # time.sleep(0.01)
        success_count += 1
        logger.info(
            "import %s, page: %s, title: %s, s/f/t: %d/%d/%d"
            % ("page", page["id"], page["title"], success_count, fail_count, success_count + fail_count)
        )
    logger.info("import %s, s/f/t: %d/%d/%d" % ("page", success_count, fail_count, success_count + fail_count))
def batch_dump(name, func):
    pages = utils.load_pages()
    ordered_pages = utils.sort_pages(pages)
    success_count = 0
    fail_count = 0
    #is_skip = True
    for page in ordered_pages:
        #if page['id'] == '13730404':
            #is_skip = False
        #if is_skip:
            #continue
        try:
            func(page['id'])
        except Exception as e:
            logger.error('dump %s fail, page id: %s' % (name, page['id']))
            fail_count += 1
            raise e
        time.sleep(0.01)
        success_count += 1
        logger.info('dump %s, page: %s, title: %s, s/f/t: %d/%d/%d' % (
            name, page['id'], page['title'], success_count, fail_count, success_count + fail_count))
    logger.info('dump %s, s/f/t: %d/%d/%d' % (
        name, success_count, fail_count, success_count + fail_count))