Пример #1
0
    def test_job_log_messages(self):
        """ Test if job run information was correctly dumped into cronjob.json file if there are some additional messages.
        Check if structure is correct.
        """
        start_time = int(time.time())
        messages = [{'label': 'Message label', 'message': 'Message text'}]
        util.job_log(start_time,
                     yc_gc.temp_dir,
                     messages=messages,
                     status='Success',
                     filename=self.filename)
        file_content = self.load_cronjobs_json()

        job_log = file_content.get(self.filename, {})
        job_log_messages = job_log['messages']

        self.assertNotEqual(file_content, {})
        self.assertIn(self.filename, file_content)
        self.assertEqual('Success', job_log['status'])
        self.assertNotEqual(len(job_log_messages), 0)
        for prop in self.job_log_properties:
            self.assertIn(prop, job_log)

        for message in job_log_messages:
            self.assertIn('label', message)
            self.assertIn('message', message)

        self.clear_job_log()
Пример #2
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    log_directory = scriptConf.log_directory
    LOGGER = log.get_logger('reviseTreeType',
                            '{}/parseAndPopulate.log'.format(log_directory))
    LOGGER.info('Starting Cron job for reviseTreeType')
    api_protocol = scriptConf.api_protocol
    ip = scriptConf.ip
    api_port = scriptConf.api_port
    is_uwsgi = scriptConf.is_uwsgi
    separator = ':'
    suffix = api_port
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'
    yangcatalog_api_prefix = '{}://{}{}{}/'.format(api_protocol, ip, separator,
                                                   suffix)
    credentials = scriptConf.credentials
    save_file_dir = scriptConf.save_file_dir
    direc = '/var/yang/tmp'
    yang_models = scriptConf.yang_models
    temp_dir = scriptConf.temp_dir
    json_ytree = scriptConf.json_ytree
    complicatedAlgorithms = ModulesComplicatedAlgorithms(
        log_directory, yangcatalog_api_prefix, credentials, save_file_dir,
        direc, {}, yang_models, temp_dir, json_ytree)
    response = requests.get('{}search/modules'.format(yangcatalog_api_prefix))
    if response.status_code != 200:
        LOGGER.error('Failed to fetch list of modules')
        job_log(start_time,
                temp_dir,
                os.path.basename(__file__),
                error=response.text,
                status='Fail')
        return
    modules_revise = []
    modules = response.json()['module']
    for module in modules:
        if module.get('tree-type') == 'nmda-compatible':
            if not complicatedAlgorithms.check_if_latest_revision(module):
                modules_revise.append(module)
    LOGGER.info('Resolving tree-types for {} modules'.format(
        len(modules_revise)))
    complicatedAlgorithms.resolve_tree_type({'module': modules_revise})
    complicatedAlgorithms.populate()
    LOGGER.info('Job finished successfully')
    job_log(start_time, temp_dir, os.path.basename(__file__), status='Success')
Пример #3
0
    def test_job_log_succes(self):
        """ Test if job run information was correctly dumped into cronjob.json file if status is Success.
        Check if structure is correct.
        """
        start_time = int(time.time())
        util.job_log(start_time,
                     yc_gc.temp_dir,
                     status='Success',
                     filename=self.filename)
        file_content = self.load_cronjobs_json()

        job_log = file_content.get(self.filename, {})

        self.assertNotEqual(file_content, {})
        self.assertIn(self.filename, file_content)
        self.assertEqual('Success', job_log['status'])
        for prop in self.job_log_properties:
            self.assertIn(prop, job_log)

        self.clear_job_log()
Пример #4
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args

    config_path = args.config_path
    config = create_config(config_path)
    yang_models = config.get('Directory-Section', 'yang-models-dir')
    token = config.get('Secrets-Section', 'yang-catalog-token')
    username = config.get('General-Section', 'repository-username')
    commit_dir = config.get('Directory-Section', 'commit-dir')
    config_name = config.get('General-Section', 'repo-config-name')
    config_email = config.get('General-Section', 'repo-config-email')
    log_directory = config.get('Directory-Section', 'logs')
    temp_dir = config.get('Directory-Section', 'temp')
    is_production = config.get('General-Section', 'is-prod')
    is_production = is_production == 'True'
    LOGGER = log.get_logger('ianaPull',
                            '{}/jobs/iana-pull.log'.format(log_directory))
    LOGGER.info('Starting job to pull IANA-maintained modules')

    repo_name = 'yang'
    repourl = 'https://{}@github.com/{}/{}.git'.format(token, username,
                                                       repo_name)
    commit_author = {'name': config_name, 'email': config_email}

    draftPullUtility.update_forked_repository(yang_models, LOGGER)
    repo = draftPullUtility.clone_forked_repository(repourl, commit_author,
                                                    LOGGER)

    if not repo:
        error_message = 'Failed to clone repository {}/{}'.format(
            username, repo_name)
        job_log(start_time,
                temp_dir,
                error=error_message,
                status='Fail',
                filename=os.path.basename(__file__))
        sys.exit()

    try:
        iana_temp_path = os.path.join(temp_dir, 'iana')
        if os.path.exists(iana_temp_path):
            shutil.rmtree(iana_temp_path)
        # call rsync to sync with rsync.iana.org::assignments/yang-parameters/
        subprocess.call([
            'rsync', '-avzq', '--delete',
            'rsync.iana.org::assignments/yang-parameters/', iana_temp_path
        ])
        draftPullUtility.set_permissions(iana_temp_path)
        iana_standard_path = os.path.join(repo.local_dir, 'standard/iana')
        if not os.path.exists(iana_standard_path):
            os.makedirs(iana_standard_path)
        xml_path = os.path.join(iana_temp_path, 'yang-parameters.xml')
        copy2(xml_path,
              '{}/standard/iana/yang-parameters.xml'.format(repo.local_dir))

        # Parse yang-parameters.xml file
        root = ET.parse(xml_path).getroot()
        tag = root.tag
        namespace = tag.split('registry')[0]
        modules = root.iter('{}record'.format(namespace))

        for module in modules:
            data = module.attrib
            for attributes in module:
                prop = attributes.tag.split(namespace)[-1]
                assert attributes.text is not None
                data[prop] = attributes.text

            if data.get('iana') == 'Y' and data.get('file'):
                src = '{}/{}'.format(iana_temp_path, data.get('file'))
                dst = '{}/standard/iana/{}'.format(repo.local_dir,
                                                   data.get('file'))
                copy2(src, dst)

        LOGGER.info('Checking module filenames without revision in {}'.format(
            iana_standard_path))
        draftPullUtility.check_name_no_revision_exist(iana_standard_path,
                                                      LOGGER)

        LOGGER.info(
            'Checking for early revision in {}'.format(iana_standard_path))
        draftPullUtility.check_early_revisions(iana_standard_path, LOGGER)

        messages = []
        try:
            # Add commit and push to the forked repository
            LOGGER.info('Adding all untracked files locally')
            untracked_files = repo.repo.untracked_files
            repo.add_untracked_remove_deleted()
            LOGGER.info('Committing all files locally')
            repo.commit_all('Cronjob - every day pull of iana yang files')
            LOGGER.info('Pushing files to forked repository')
            commit_hash = repo.repo.head.commit
            LOGGER.info('Commit hash {}'.format(commit_hash))
            with open(commit_dir, 'w+') as f:
                f.write('{}\n'.format(commit_hash))
            if is_production:
                LOGGER.info(
                    'Pushing untracked and modified files to remote repository'
                )
                repo.push()
            else:
                LOGGER.info(
                    'DEV environment - not pushing changes into remote repository'
                )
                LOGGER.debug(
                    'List of all untracked and modified files:\n{}'.format(
                        '\n'.join(untracked_files)))
        except GitCommandError as e:
            message = 'Error while pushing procedure - git command error: \n {} \n git command out: \n {}'.format(
                e.stderr, e.stdout)
            if 'Your branch is up to date' in e.stdout:
                LOGGER.warning(message)
                messages = [{
                    'label': 'Pull request created',
                    'message': 'False - branch is up to date'
                }]
            else:
                LOGGER.exception(
                    'Error while pushing procedure - Git command error')
                raise e
        except Exception as e:
            LOGGER.exception('Error while pushing procedure {}'.format(
                sys.exc_info()[0]))
            raise type(e)('Error while pushing procedure')
    except Exception as e:
        LOGGER.exception('Exception found while running draftPull script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e

    # Remove tmp folder
    LOGGER.info('Removing tmp directory')

    if len(messages) == 0:
        messages = [{
            'label': 'Pull request created',
            'message': 'True - {}'.format(commit_hash)
        }  # pyright: ignore
                    ]
    job_log(start_time,
            temp_dir,
            messages=messages,
            status='Success',
            filename=os.path.basename(__file__))
    LOGGER.info('Job finished successfully')
Пример #5
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    revision_updated_modules = 0
    datatracker_failures = []
    args = scriptConf.args
    log_directory = scriptConf.log_directory
    temp_dir = scriptConf.temp_dir
    is_uwsgi = scriptConf.is_uwsgi
    LOGGER = log.get_logger(
        'resolveExpiration',
        '{}/jobs/resolveExpiration.log'.format(log_directory))

    separator = ':'
    suffix = args.api_port
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'
    yangcatalog_api_prefix = '{}://{}{}{}/'.format(args.api_protocol,
                                                   args.api_ip, separator,
                                                   suffix)
    redisConnection = RedisConnection()
    LOGGER.info('Starting Cron job resolve modules expiration')
    try:
        LOGGER.info('Requesting all the modules from {}'.format(
            yangcatalog_api_prefix))
        updated = False

        response = requests.get(
            '{}search/modules'.format(yangcatalog_api_prefix))
        if response.status_code < 200 or response.status_code > 299:
            LOGGER.error('Request on path {} failed with {}'.format(
                yangcatalog_api_prefix, response.text))
        else:
            LOGGER.debug('{} modules fetched from {} successfully'.format(
                len(response.json().get('module', [])),
                yangcatalog_api_prefix))
        modules = response.json().get('module', [])
        i = 1
        for module in modules:
            LOGGER.debug('{} out of {}'.format(i, len(modules)))
            i += 1
            ret = resolve_expiration(module, LOGGER, datatracker_failures,
                                     redisConnection)
            if ret:
                revision_updated_modules += 1
            if not updated:
                updated = ret
        if updated:
            redisConnection.populate_modules(modules)
            url = ('{}load-cache'.format(yangcatalog_api_prefix))
            response = requests.post(url,
                                     None,
                                     auth=(args.credentials[0],
                                           args.credentials[1]))
            LOGGER.info('Cache loaded with status {}'.format(
                response.status_code))
    except Exception as e:
        LOGGER.exception(
            'Exception found while running resolveExpiration script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e
    if len(datatracker_failures) > 0:
        LOGGER.debug(
            'Following references failed to get from the datatracker:\n {}'.
            format('\n'.join(datatracker_failures)))
    messages = [{
        'label': 'Modules with changed revison',
        'message': revision_updated_modules
    }, {
        'label': 'Datatracker modules failures',
        'message': len(datatracker_failures)
    }]
    job_log(start_time,
            temp_dir,
            messages=messages,
            status='Success',
            filename=os.path.basename(__file__))
    LOGGER.info('Job finished successfully')
Пример #6
0
            result['message'] = '{} NOT OK'.format(response.status_code)
        messages.append(result)

        # GET 3
        # NOTE: Module should already be removed - 404 status code is expected
        result = {}
        result['label'] = 'GET 2 {}@2018-04-03'.format(check_module_name)
        response = confdService.get_module(new_module_key)

        if response.status_code == 404:
            result['message'] = '{} OK'.format(response.status_code)
        else:
            LOGGER.info('Module {}@2018-04-03 already in ConfD'.format(
                check_module_name))
            result['message'] = '{} NOT OK'.format(response.status_code)
        messages.append(result)

        job_log(start_time,
                temp_dir,
                messages=messages,
                status='Success',
                filename=os.path.basename(__file__))

    except Exception as e:
        LOGGER.exception(e)
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
Пример #7
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args

    config_path = args.config_path
    config = create_config(config_path)
    token = config.get('Secrets-Section', 'yang-catalog-token')
    username = config.get('General-Section', 'repository-username')
    commit_dir = config.get('Directory-Section', 'commit-dir')
    config_name = config.get('General-Section', 'repo-config-name')
    config_email = config.get('General-Section', 'repo-config-email')
    log_directory = config.get('Directory-Section', 'logs')
    temp_dir = config.get('Directory-Section', 'temp')
    exceptions = config.get('Directory-Section', 'exceptions')
    yang_models = config.get('Directory-Section', 'yang-models-dir')
    ietf_draft_url = config.get('Web-Section', 'ietf-draft-private-url')
    ietf_rfc_url = config.get('Web-Section', 'ietf-RFC-tar-private-url')
    is_production = config.get('General-Section', 'is-prod')
    is_production = is_production == 'True'
    LOGGER = log.get_logger('draftPull',
                            '{}/jobs/draft-pull.log'.format(log_directory))
    LOGGER.info('Starting Cron job IETF pull request')

    repo_name = 'yang'
    repourl = 'https://{}@github.com/{}/{}.git'.format(token, username,
                                                       repo_name)
    commit_author = {'name': config_name, 'email': config_email}

    draftPullUtility.update_forked_repository(yang_models, LOGGER)
    repo = draftPullUtility.clone_forked_repository(repourl, commit_author,
                                                    LOGGER)

    if not repo:
        error_message = 'Failed to clone repository {}/{}'.format(
            username, repo_name)
        job_log(start_time,
                temp_dir,
                error=error_message,
                status='Fail',
                filename=os.path.basename(__file__))
        sys.exit()

    try:
        # Get rfc.tgz file
        response = requests.get(ietf_rfc_url)
        tgz_path = '{}/rfc.tgz'.format(repo.local_dir)
        extract_to = '{}/standard/ietf/RFCtemp'.format(repo.local_dir)
        with open(tgz_path, 'wb') as zfile:
            zfile.write(response.content)
        tar_opened = draftPullUtility.extract_rfc_tgz(tgz_path, extract_to,
                                                      LOGGER)
        if tar_opened:
            diff_files = []
            new_files = []

            temp_rfc_yang_files = glob.glob(
                '{}/standard/ietf/RFCtemp/*.yang'.format(repo.local_dir))
            for temp_rfc_yang_file in temp_rfc_yang_files:
                file_name = os.path.basename(temp_rfc_yang_file)
                rfc_yang_file = temp_rfc_yang_file.replace('RFCtemp', 'RFC')

                if not os.path.exists(rfc_yang_file):
                    new_files.append(file_name)
                    continue

                same = filecmp.cmp(rfc_yang_file, temp_rfc_yang_file)
                if not same:
                    diff_files.append(file_name)

            shutil.rmtree('{}/standard/ietf/RFCtemp'.format(repo.local_dir))

            with open(exceptions, 'r') as exceptions_file:
                remove_from_new = exceptions_file.read().split('\n')
            new_files = [
                file_name for file_name in new_files
                if file_name not in remove_from_new
            ]

            if args.send_message:
                if new_files or diff_files:
                    LOGGER.info(
                        'new or modified RFC files found. Sending an E-mail')
                    mf = messageFactory.MessageFactory()
                    mf.send_new_rfc_message(new_files, diff_files)

        # Experimental draft modules
        try:
            os.makedirs('{}/experimental/ietf-extracted-YANG-modules/'.format(
                repo.local_dir))
        except OSError as e:
            # be happy if someone already created the path
            if e.errno != errno.EEXIST:
                raise
        experimental_path = '{}/experimental/ietf-extracted-YANG-modules'.format(
            repo.local_dir)

        LOGGER.info('Updating IETF drafts download links')
        draftPullUtility.get_draft_module_content(ietf_draft_url,
                                                  experimental_path, LOGGER)

        LOGGER.info('Checking module filenames without revision in {}'.format(
            experimental_path))
        draftPullUtility.check_name_no_revision_exist(experimental_path,
                                                      LOGGER)

        LOGGER.info(
            'Checking for early revision in {}'.format(experimental_path))
        draftPullUtility.check_early_revisions(experimental_path, LOGGER)

        messages = []
        try:
            # Add commit and push to the forked repository
            LOGGER.info('Adding all untracked files locally')
            untracked_files = repo.repo.untracked_files
            repo.add_untracked_remove_deleted()
            LOGGER.info('Committing all files locally')
            repo.commit_all(
                'Cronjob - every day pull of ietf draft yang files.')
            LOGGER.info('Pushing files to forked repository')
            commit_hash = repo.repo.head.commit
            LOGGER.info('Commit hash {}'.format(commit_hash))
            with open(commit_dir, 'w+') as f:
                f.write('{}\n'.format(commit_hash))
            if is_production:
                LOGGER.info(
                    'Pushing untracked and modified files to remote repository'
                )
                repo.push()
            else:
                LOGGER.info(
                    'DEV environment - not pushing changes into remote repository'
                )
                LOGGER.debug(
                    'List of all untracked and modified files:\n{}'.format(
                        '\n'.join(untracked_files)))
        except GitCommandError as e:
            message = 'Error while pushing procedure - git command error: \n {} \n git command out: \n {}'.format(
                e.stderr, e.stdout)
            if 'Your branch is up to date' in e.stdout:
                LOGGER.warning(message)
                messages = [{
                    'label': 'Pull request created',
                    'message': 'False - branch is up to date'
                }]
            else:
                LOGGER.exception(
                    'Error while pushing procedure - Git command error')
                raise e
        except Exception as e:
            LOGGER.exception('Error while pushing procedure {}'.format(
                sys.exc_info()[0]))
            raise type(e)('Error while pushing procedure')
    except Exception as e:
        LOGGER.exception('Exception found while running draftPull script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e

    if len(messages) == 0:
        messages = [{
            'label': 'Pull request created',
            'message': 'True - {}'.format(commit_hash)
        }  # pyright: ignore
                    ]
    job_log(start_time,
            temp_dir,
            messages=messages,
            status='Success',
            filename=os.path.basename(__file__))
    LOGGER.info('Job finished successfully')
Пример #8
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args

    config_path = args.config_path
    config = create_config(config_path)
    api_ip = config.get('Web-Section', 'ip')
    api_port = int(config.get('Web-Section', 'api-port'))
    credentials = config.get('Secrets-Section',
                             'confd-credentials').strip('"').split(' ')
    api_protocol = config.get('General-Section', 'protocol-api')
    is_uwsgi = config.get('General-Section', 'uwsgi')
    config_name = config.get('General-Section', 'repo-config-name')
    config_email = config.get('General-Section', 'repo-config-email')
    log_directory = config.get('Directory-Section', 'logs')
    temp_dir = config.get('Directory-Section', 'temp')
    openconfig_repo_url = config.get('Web-Section',
                                     'openconfig-models-repo-url')
    LOGGER = log.get_logger(
        'openconfigPullLocal',
        '{}/jobs/openconfig-pull.log'.format(log_directory))
    LOGGER.info('Starting Cron job openconfig pull request local')

    separator = ':'
    suffix = api_port
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'
    yangcatalog_api_prefix = '{}://{}{}{}/'.format(api_protocol, api_ip,
                                                   separator, suffix)

    commit_author = {'name': config_name, 'email': config_email}
    repo = draftPullUtility.clone_forked_repository(openconfig_repo_url,
                                                    commit_author, LOGGER)
    assert repo
    modules = []
    try:
        yang_files = glob('{}/release/models/**/*.yang'.format(repo.local_dir),
                          recursive=True)
        for yang_file in yang_files:
            basename = os.path.basename(yang_file)
            name = basename.split('.')[0].split('@')[0]
            revision = resolve_revision(yang_file)
            path = yang_file.split('{}/'.format(repo.local_dir))[-1]
            module = {
                'generated-from': 'not-applicable',
                'module-classification': 'unknown',
                'name': name,
                'revision': revision,
                'organization': 'openconfig',
                'source-file': {
                    'owner': 'openconfig',
                    'path': path,
                    'repository': 'public'
                }
            }
            modules.append(module)
        data = json.dumps({'modules': {'module': modules}})
    except Exception as e:
        LOGGER.exception(
            'Exception found while running openconfigPullLocal script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e
    LOGGER.debug(data)
    api_path = '{}modules'.format(yangcatalog_api_prefix)
    response = requests.put(api_path,
                            data,
                            auth=(credentials[0], credentials[1]),
                            headers=json_headers)

    status_code = response.status_code
    payload = json.loads(response.text)
    if status_code < 200 or status_code > 299:
        e = 'PUT /api/modules responsed with status code {}'.format(
            status_code)
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        LOGGER.info(
            'Job finished, but an error occured while sending PUT to /api/modules'
        )
    else:
        messages = [{'label': 'Job ID', 'message': payload['job-id']}]
        job_log(start_time,
                temp_dir,
                messages=messages,
                status='Success',
                filename=os.path.basename(__file__))
        LOGGER.info('Job finished successfully')
Пример #9
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args

    config_path = args.config_path
    config = create_config(config_path)
    notify_indexing = config.get('General-Section', 'notify-index')
    config_name = config.get('General-Section', 'repo-config-name')
    config_email = config.get('General-Section', 'repo-config-email')
    log_directory = config.get('Directory-Section', 'logs')
    ietf_draft_url = config.get('Web-Section', 'ietf-draft-private-url')
    ietf_rfc_url = config.get('Web-Section', 'ietf-RFC-tar-private-url')
    temp_dir = config.get('Directory-Section', 'temp')
    LOGGER = log.get_logger(
        'draftPullLocal', '{}/jobs/draft-pull-local.log'.format(log_directory))
    LOGGER.info('Starting cron job IETF pull request local')

    messages = []
    notify_indexing = notify_indexing == 'True'
    populate_error = False
    repo = None
    try:
        # Clone YangModels/yang repository
        clone_dir = '{}/draftpulllocal'.format(temp_dir)
        if os.path.exists(clone_dir):
            shutil.rmtree(clone_dir)
        repo = repoutil.ModifiableRepoUtil(os.path.join(
            github_url, 'YangModels/yang.git'),
                                           clone_options={
                                               'config_username': config_name,
                                               'config_user_email':
                                               config_email,
                                               'local_dir': clone_dir
                                           })
        LOGGER.info('YangModels/yang repo cloned to local directory {}'.format(
            repo.local_dir))

        response = requests.get(ietf_rfc_url)
        tgz_path = '{}/rfc.tgz'.format(repo.local_dir)
        extract_to = '{}/standard/ietf/RFC'.format(repo.local_dir)
        with open(tgz_path, 'wb') as zfile:
            zfile.write(response.content)
        tar_opened = draftPullUtility.extract_rfc_tgz(tgz_path, extract_to,
                                                      LOGGER)

        if tar_opened:
            # Standard RFC modules
            direc = '{}/standard/ietf/RFC'.format(repo.local_dir)

            LOGGER.info(
                'Checking module filenames without revision in {}'.format(
                    direc))
            draftPullUtility.check_name_no_revision_exist(direc, LOGGER)

            LOGGER.info('Checking for early revision in {}'.format(direc))
            draftPullUtility.check_early_revisions(direc, LOGGER)

            execution_result = run_populate_script(direc, notify_indexing,
                                                   LOGGER)
            if execution_result == False:
                populate_error = True
                message = {
                    'label': 'Standard RFC modules',
                    'message': 'Error while calling populate script'
                }
                messages.append(message)
            else:
                message = {
                    'label': 'Standard RFC modules',
                    'message': 'populate script finished successfully'
                }
                messages.append(message)

        # Experimental modules
        experimental_path = '{}/experimental/ietf-extracted-YANG-modules'.format(
            repo.local_dir)

        LOGGER.info('Updating IETF drafts download links')
        draftPullUtility.get_draft_module_content(ietf_draft_url,
                                                  experimental_path, LOGGER)

        LOGGER.info('Checking module filenames without revision in {}'.format(
            experimental_path))
        draftPullUtility.check_name_no_revision_exist(experimental_path,
                                                      LOGGER)

        LOGGER.info(
            'Checking for early revision in {}'.format(experimental_path))
        draftPullUtility.check_early_revisions(experimental_path, LOGGER)

        execution_result = run_populate_script(experimental_path,
                                               notify_indexing, LOGGER)
        if execution_result == False:
            populate_error = True
            message = {
                'label': 'Experimental modules',
                'message': 'Error while calling populate script'
            }
            messages.append(message)
        else:
            message = {
                'label': 'Experimental modules',
                'message': 'populate script finished successfully'
            }
            messages.append(message)

        # IANA modules
        iana_path = '{}/standard/iana'.format(repo.local_dir)

        if os.path.exists(iana_path):
            LOGGER.info(
                'Checking module filenames without revision in {}'.format(
                    iana_path))
            draftPullUtility.check_name_no_revision_exist(iana_path, LOGGER)

            LOGGER.info('Checking for early revision in {}'.format(iana_path))
            draftPullUtility.check_early_revisions(iana_path, LOGGER)

            execution_result = run_populate_script(iana_path, notify_indexing,
                                                   LOGGER)
            if execution_result == False:
                populate_error = True
                message = {
                    'label': 'IANA modules',
                    'message': 'Error while calling populate script'
                }
                messages.append(message)
            else:
                message = {
                    'label': 'IANA modules',
                    'message': 'populate script finished successfully'
                }
                messages.append(message)

    except Exception as e:
        LOGGER.exception('Exception found while running draftPullLocal script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e
    if not populate_error:
        LOGGER.info('Job finished successfully')
    else:
        LOGGER.info(
            'Job finished, but errors found while calling populate script')
    job_log(start_time,
            temp_dir,
            messages=messages,
            status='Success',
            filename=os.path.basename(__file__))
Пример #10
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    api_protocol = scriptConf.api_protocol
    ip = scriptConf.ip
    api_port = scriptConf.api_port
    is_uwsgi = scriptConf.is_uwsgi
    temp_dir = scriptConf.temp_dir
    log_directory = scriptConf.log_directory
    save_file_dir = scriptConf.save_file_dir
    yang_models = scriptConf.yang_models
    credentials = scriptConf.credentials
    json_ytree = scriptConf.json_ytree

    LOGGER = log.get_logger('sandbox', '{}/sandbox.log'.format(log_directory))

    separator = ':'
    suffix = api_port
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'

    yangcatalog_api_prefix = '{}://{}{}{}/'.format(api_protocol, ip, separator,
                                                   suffix)
    # yangcatalog_api_prefix = 'https://yangcatalog.org/api/'
    url = '{}search/modules'.format(yangcatalog_api_prefix)
    LOGGER.info('Getting all the modules from: {}'.format(url))
    response = requests.get(url, headers={'Accept': 'application/json'})

    all_existing_modules = response.json().get('module', [])

    global path
    path = '{}/semver_prepare.json'.format(temp_dir)

    all_modules = get_list_of_unique_modules(all_existing_modules)
    LOGGER.info('Number of unique modules: {}'.format(
        len(all_modules['module'])))

    # Uncomment the next line to read data from the file semver_prepare.json
    # all_modules = load_from_json(path)

    # Initialize ModulesComplicatedAlgorithms
    direc = '/var/yang/tmp'

    num_of_modules = len(all_modules['module'])
    chunk_size = 100
    chunks = (num_of_modules - 1) // chunk_size + 1
    for i in range(chunks):
        try:
            LOGGER.info('Proccesing chunk {} out of {}'.format(i, chunks))
            batch = all_modules['module'][i * chunk_size:(i + 1) * chunk_size]
            batch_modules = {'module': batch}
            recursion_limit = sys.getrecursionlimit()
            sys.setrecursionlimit(50000)
            complicatedAlgorithms = ModulesComplicatedAlgorithms(
                log_directory, yangcatalog_api_prefix, credentials,
                save_file_dir, direc, batch_modules, yang_models, temp_dir,
                json_ytree)
            complicatedAlgorithms.parse_semver()
            sys.setrecursionlimit(recursion_limit)
            complicatedAlgorithms.populate()
        except:
            LOGGER.exception(
                'Exception occured during running ModulesComplicatedAlgorithms'
            )
            continue

    messages = [{
        'label': 'Number of modules checked',
        'message': num_of_modules
    }]
    end = time.time()
    LOGGER.info(
        'Populate took {} seconds with the main and complicated algorithm'.
        format(int(end - start_time)))
    filename = os.path.basename(__file__).split('.py')[0]
    job_log(start_time,
            temp_dir,
            filename,
            messages=messages,
            status='Success')
    LOGGER.info('Job finished successfully')
Пример #11
0
def main(scriptConf=None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args
    cache_directory = scriptConf.cache_directory
    log_directory = scriptConf.log_directory
    temp_dir = scriptConf.temp_dir
    var_yang = scriptConf.var_yang
    confdService = ConfdService()

    confd_backups = os.path.join(cache_directory, 'confd')
    redis_backups = os.path.join(cache_directory, 'redis')
    redis_json_backup = os.path.join(cache_directory, 'redis-json')

    LOGGER = log.get_logger('recovery', os.path.join(log_directory, 'yang.log'))
    LOGGER.info('Starting {} process of Redis database'.format(args.type))

    if 'save' == args.type:
        # Redis dump.rdb file backup
        redis_backup_file = '{}/redis/dump.rdb'.format(var_yang)
        if not os.path.exists(redis_backups):
            os.mkdir(redis_backups)
        if os.path.exists(redis_backup_file):
            redis_copy_file = os.path.join(redis_backups, '{}.rdb.gz'.format(args.name_save))
            with gzip.open(redis_copy_file, 'w') as save_file:
                with open(redis_backup_file, 'rb') as original:
                    save_file.write(original.read())
            LOGGER.info('Backup of Redis dump.rdb file created')
        else:
            LOGGER.warning('Redis dump.rdb file does not exists')

        # Backup content of Redis into JSON file
        redisConnection = RedisConnection()
        redis_modules_raw = redisConnection.get_all_modules()
        redis_vendors_raw = redisConnection.get_all_vendors()
        redis_modules_dict = json.loads(redis_modules_raw)
        redis_modules = [i for i in redis_modules_dict.values()]
        redis_vendors = json.loads(redis_vendors_raw)

        if not os.path.exists(redis_json_backup):
            os.mkdir(redis_json_backup)
        with open(os.path.join(redis_json_backup, 'backup.json'), 'w') as f:
            data = {
                'yang-catalog:catalog': {
                    'modules': redis_modules,
                    'vendors': redis_vendors
                }
            }
            json.dump(data, f)

        num_of_modules = len(redis_modules)
        num_of_vendors = len(redis_vendors.get('vendor', []))
        messages = [
            {'label': 'Saved modules', 'message': num_of_modules},
            {'label': 'Saved vendors', 'message': num_of_vendors}
        ]
        LOGGER.info('Save completed successfully')
        filename = '{} - save'.format(os.path.basename(__file__).split('.py')[0])
        job_log(start_time, temp_dir, messages=messages, status='Success', filename=filename)
    else:
        file_name = ''
        if args.name_load:
            file_name = os.path.join(confd_backups, args.name_load)
        else:
            list_of_backups = get_list_of_backups(confd_backups)
            file_name = os.path.join(confd_backups, list_of_backups[-1])

        redisConnection = RedisConnection()
        redis_modules = redisConnection.get_all_modules()
        yang_catalog_module = redisConnection.get_module('yang-catalog@2018-04-03/ietf')

        if '{}' in (redis_modules, yang_catalog_module):
            # RDB not exists - load from JSON
            backup_path = os.path.join(redis_json_backup, 'backup.json')
            modules = []
            vendors = []
            if os.path.exists(backup_path):
                with open(backup_path, 'r') as file_load:
                    catalog_data = json.load(file_load)
                    modules = catalog_data.get('yang-catalog:catalog', {}).get('modules', {}).get('module', [])
                    vendors = catalog_data.get('yang-catalog:catalog', {}).get('vendors', {}).get('vendor', [])
            else:
                if file_name.endswith('.gz'):
                    with gzip.open(file_name, 'r') as file_load:
                        LOGGER.info('Loading file {}'.format(file_load.name))
                        catalog_data = json.loads(file_load.read().decode())
                        modules = catalog_data.get('yang-catalog:catalog', {}).get('modules', {}).get('module', [])
                        vendors = catalog_data.get('yang-catalog:catalog', {}).get('vendors', {}).get('vendor', [])
                elif file_name.endswith('.json'):
                    with open(file_name, 'r') as file_load:
                        LOGGER.info('Loading file {}'.format(file_load.name))
                        catalog_data = json.load(file_load)
                        modules = catalog_data.get('yang-catalog:catalog', {}).get('modules', {}).get('module', [])
                        vendors = catalog_data.get('yang-catalog:catalog', {}).get('vendors', {}).get('vendor', [])
                else:
                    print('unable to load modules - ending')

            redisConnection.populate_modules(modules)
            redisConnection.populate_implementation(vendors)
            redisConnection.reload_modules_cache()
            redisConnection.reload_vendors_cache()
            LOGGER.info('All the modules data set to Redis successfully')

        tries = 4
        try:
            response = confdService.head_confd()
            LOGGER.info('Status code for HEAD request {} '.format(response.status_code))
            if response.status_code == 200:
                yang_catalog_module = redisConnection.get_module('yang-catalog@2018-04-03/ietf')
                error = feed_confd_modules([json.loads(yang_catalog_module)], confdService)
                if error:
                    LOGGER.error('Error occurred while patching yang-catalog@2018-04-03/ietf module')
                else:
                    LOGGER.info('yang-catalog@2018-04-03/ietf patched successfully')
        except ConnectionError:
            if tries == 0:
                LOGGER.exception('Unable to connect to ConfD for over 5 minutes')
            tries -= 1
            sleep(60)

    LOGGER.info('Job finished successfully')
Пример #12
0
def main(scriptConf: t.Optional[ScriptConfig] = None):
    start_time = int(time.time())
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args

    config_path = args.config_path
    config = create_config(config_path)
    protocol = config.get('General-Section', 'protocol-api')
    api_ip = config.get('Web-Section', 'ip')
    api_port = config.get('Web-Section', 'api-port')
    config_name = config.get('General-Section', 'repo-config-name')
    config_email = config.get('General-Section', 'repo-config-email')
    move_to = '{}/.'.format(config.get('Web-Section', 'public-directory'))
    is_uwsgi = config.get('General-Section', 'uwsgi')
    yang_models = config.get('Directory-Section', 'yang-models-dir')
    log_directory = config.get('Directory-Section', 'logs')
    temp_dir = config.get('Directory-Section', 'temp')
    private_dir = config.get('Web-Section', 'private-directory')

    global LOGGER
    LOGGER = log.get_logger('statistics',
                            '{}/statistics/yang.log'.format(log_directory))
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'
    else:
        separator = ':'
        suffix = api_port
    global yangcatalog_api_prefix
    yangcatalog_api_prefix = '{}://{}{}{}/'.format(protocol, api_ip, separator,
                                                   suffix)
    LOGGER.info('Starting statistics')
    repo = None

    # Fetch the list of all modules known by YangCatalog
    url = os.path.join(yangcatalog_api_prefix, 'search/modules')
    try:
        response = requests.get(url, headers=json_headers)
        if response.status_code != 200:
            LOGGER.error('Cannot access {}, response code: {}'.format(
                url, response.status_code))
            sys.exit(1)
        else:
            all_modules_data = response.json()
    except requests.exceptions.RequestException as e:
        LOGGER.error(
            'Cannot access {}, response code: {}\nRetrying in 120s'.format(
                url, e.response))
        time.sleep(120)
        response = requests.get(url, headers=json_headers)
        if response.status_code != 200:
            LOGGER.error('Cannot access {}, response code: {}'.format(
                url, response.status_code))
            sys.exit(1)
        else:
            all_modules_data = response.json()
            LOGGER.error('Success after retry on {}'.format(url))

    vendor_data = {}
    for module in all_modules_data['module']:
        for implementation in module.get('implementations',
                                         {}).get('implementation', []):
            if implementation['vendor'] == 'cisco':
                if implementation['os-type'] not in vendor_data:
                    vendor_data[implementation['os-type']] = {}
                version = implementation['software-version']
                if implementation['os-type'] in ('IOS-XE', 'IOS-XR'):
                    version = version.replace('.', '')
                elif implementation['os-type'] == 'NX-OS':
                    version = version.replace('(',
                                              '-').replace(')',
                                                           '-').rstrip('-')
                if version not in vendor_data[implementation['os-type']]:
                    vendor_data[implementation['os-type']][version] = set()
                vendor_data[implementation['os-type']][version].add(
                    implementation['platform'])

    try:
        # pull(yang_models) no need to pull https://github.com/YangModels/yang as it is daily done via SDO_analysis module

        # function needs to be renamed to something more descriptive (I don't quite understand it's purpose)
        def process_platforms(versions: t.List[str], module_platforms,
                              os_type: str,
                              os_type_name: str) -> t.Tuple[list, dict]:
            platform_values = []
            json_output = {}
            for version in versions:
                path = '{}/vendor/cisco/{}/{}/platform-metadata.json'.format(
                    yang_models, os_type, version)
                try:
                    with open(path, 'r') as f:
                        data = json.load(f)
                        metadata_platforms = data['platforms']['platform']
                except Exception:
                    LOGGER.exception('Problem with opening {}'.format(path))
                    metadata_platforms = []
                values = [version]
                json_output[version] = {}
                for module_platform in module_platforms:
                    exist = '<i class="fa fa-times"></i>'
                    exist_json = False
                    if os_type_name in vendor_data:
                        if version in vendor_data[os_type_name]:
                            if module_platform in vendor_data[os_type_name][
                                    version]:
                                exist = '<i class="fa fa-check"></i>'
                                exist_json = True
                    for metadata_platform in metadata_platforms:
                        if (metadata_platform['name'] == module_platform
                                and metadata_platform['software-version']
                                == version):
                            values.append(
                                '<i class="fa fa-check"></i>/{}'.format(exist))
                            json_output[version][module_platform] = {
                                'yangcatalog': True,
                                'github': exist_json
                            }
                            break
                    else:
                        values.append(
                            '<i class="fa fa-times"></i>/{}'.format(exist))
                        json_output[version][module_platform] = {
                            'yangcatalog': False,
                            'github': exist_json
                        }
                platform_values.append(values)
            return platform_values, json_output

        os_types = (('xr', 'IOS-XR'), ('xe', 'IOS-XE'), ('nx', 'NX-OS'))

        platforms = {}
        for os_type, _ in os_types:
            platforms[os_type] = solve_platforms('{}/vendor/cisco/{}'.format(
                yang_models, os_type))

        versions = {}
        for os_type, _ in os_types:
            os_type_dir = os.path.join(yang_models, 'vendor/cisco', os_type)
            dirs = (dir for dir in os.listdir(os_type_dir)
                    if os.path.isdir(os.path.join(os_type_dir, dir)))
            versions[os_type] = sorted(dirs)

        values = {}
        json_output = {}
        for os_type, name in os_types:
            values[os_type], json_output[os_type] = process_platforms(
                versions[os_type], platforms[os_type], os_type, name)

        global all_modules_data_unique
        all_modules_data_unique = {}
        for mod in all_modules_data['module']:
            name = mod['name']
            revision = mod['revision']
            org = mod['organization']
            all_modules_data_unique['{}@{}_{}'.format(name, revision,
                                                      org)] = mod
        all_modules_data = len(all_modules_data['module'])

        # Vendors separately
        vendor_list = []

        def get_output(**kwargs) -> str:
            """run runYANGallstats with the provided kwargs as command line arguments.
            removedup is set to True by default.
            """
            kwargs.setdefault('removedup', True)
            script_conf = all_stats.ScriptConfig()
            for key, value in kwargs.items():
                setattr(script_conf.args, key, value)
            with redirect_stdout(io.StringIO()) as f:
                all_stats.main(script_conf=script_conf)
            return f.getvalue()

        for direc in next(os.walk(os.path.join(yang_models, 'vendor')))[1]:
            vendor_direc = os.path.join(yang_models, 'vendor', direc)
            if os.path.isdir(vendor_direc):
                LOGGER.info(
                    'Running runYANGallstats.py for directory {}'.format(
                        vendor_direc))
                out = get_output(rootdir=vendor_direc)
                process_data(out, vendor_list, vendor_direc, direc)

        # Vendors all together
        out = get_output(rootdir=os.path.join(yang_models, 'vendor'))
        vendor_modules = out.split(
            '{}/vendor : '.format(yang_models))[1].splitlines()[0]
        vendor_modules_ndp = out.split(
            '{}/vendor (duplicates removed): '.format(
                yang_models))[1].splitlines()[0]

        # Standard all together
        out = get_output(rootdir=os.path.join(yang_models, 'standard'))
        standard_modules = out.split(
            '{}/standard : '.format(yang_models))[1].splitlines()[0]
        standard_modules_ndp = out.split(
            '{}/standard (duplicates removed): '.format(
                yang_models))[1].splitlines()[0]

        # Standard separately
        sdo_list = []

        def process_sdo_dir(dir: str, name: str):
            out = get_output(rootdir=os.path.join(yang_models, dir))
            process_data(out, sdo_list, os.path.join(yang_models, dir), name)

        process_sdo_dir('standard/ietf/RFC', 'IETF RFCs')
        process_sdo_dir('standard/ietf/DRAFT', 'IETF drafts')
        process_sdo_dir('experimental/ietf-extracted-YANG-modules',
                        'IETF experimental drafts')
        process_sdo_dir('standard/iana', 'IANA standard')
        process_sdo_dir('standard/bbf/standard', 'BBF standard')
        process_sdo_dir('standard/etsi', 'ETSI standard')

        for direc in next(
                os.walk(os.path.join(yang_models,
                                     'standard/ieee/published')))[1]:
            ieee_direc = os.path.join(yang_models, 'standard/ieee/published',
                                      direc)
            if os.path.isdir(ieee_direc):
                process_sdo_dir(os.path.join('standard/ieee/published', direc),
                                'IEEE {} with par'.format(direc))

        for direc in next(
                os.walk(os.path.join(yang_models, 'standard/ieee/draft')))[1]:
            ieee_direc = os.path.join(yang_models, 'standard/ieee/draft',
                                      direc)
            if os.path.isdir(ieee_direc):
                process_sdo_dir(os.path.join('standard/ieee/draft', direc),
                                'IEEE draft {} with par'.format(direc))

        for direc in next(
                os.walk(os.path.join(yang_models, 'experimental/ieee')))[1]:
            ieee_direc = os.path.join(yang_models, 'experimental/ieee', direc)
            if os.path.isdir(ieee_direc):
                process_sdo_dir(os.path.join('experimental/ieee', direc),
                                'IEEE {} no par'.format(direc))

        process_sdo_dir('standard/mef/src/model/standard', 'MEF standard')
        process_sdo_dir('standard/mef/src/model/draft', 'MEF draft')

        # Openconfig is from different repo that s why we need models in github zero
        LOGGER.info('Cloning the repo')
        repo = repoutil.ModifiableRepoUtil(os.path.join(
            github_url, 'openconfig/public'),
                                           clone_options={
                                               'config_username': config_name,
                                               'config_user_email':
                                               config_email
                                           })

        out = get_output(
            rootdir=os.path.join(repo.local_dir, 'release/models'))
        process_data(out, sdo_list,
                     os.path.join(repo.local_dir, 'release/models'),
                     'openconfig')

        context = {
            'table_sdo': sdo_list,
            'table_vendor': vendor_list,
            'num_yang_files_vendor': vendor_modules,
            'num_yang_files_vendor_ndp': vendor_modules_ndp,
            'num_yang_files_standard': standard_modules,
            'num_yang_files_standard_ndp': standard_modules_ndp,
            'num_parsed_files': all_modules_data,
            'num_unique_parsed_files': len(all_modules_data_unique),
            'xr': platforms['xr'],
            'xe': platforms['xe'],
            'nx': platforms['nx'],
            'xr_values': values['xr'],
            'xe_values': values['xe'],
            'nx_values': values['nx'],
            'current_date': time.strftime('%d/%m/%y')
        }
        LOGGER.info('Rendering data')
        with open('{}/stats/stats.json'.format(private_dir), 'w') as f:
            for sdo in sdo_list:
                sdo['percentage_compile'] = float(
                    sdo['percentage_compile'].split(' ')[0])
                sdo['percentage_extra'] = float(
                    sdo['percentage_extra'].split(' ')[0])
            for vendor in vendor_list:
                vendor['percentage_compile'] = float(
                    vendor['percentage_compile'].split(' ')[0])
                vendor['percentage_extra'] = float(
                    vendor['percentage_extra'].split(' ')[0])
            output = {
                'table_sdo': sdo_list,
                'table_vendor': vendor_list,
                'num_yang_files_vendor': int(vendor_modules),
                'num_yang_files_vendor_ndp': int(vendor_modules_ndp),
                'num_yang_files_standard': int(standard_modules),
                'num_yang_files_standard_ndp': int(standard_modules_ndp),
                'num_parsed_files': all_modules_data,
                'num_unique_parsed_files': len(all_modules_data_unique),
                'xr': json_output['xr'],
                'xe': json_output['xe'],
                'nx': json_output['nx'],
                'current_date': time.strftime('%d/%m/%y')
            }
            json.dump(output, f)
        result = render(
            os.path.join(os.environ['BACKEND'],
                         'statistic/template/stats.html'), context)
        with open(
                os.path.join(os.environ['BACKEND'],
                             'statistic/statistics.html'), 'w+') as f:
            f.write(result)

        file_from = os.path.abspath(
            os.path.join(os.environ['BACKEND'], 'statistic/statistics.html'))
        file_to = os.path.join(os.path.abspath(move_to), 'statistics.html')
        resolved_path_file_to = os.path.realpath(file_to)
        if move_to != './':
            if os.path.exists(resolved_path_file_to):
                os.remove(resolved_path_file_to)
            shutil.move(file_from, resolved_path_file_to)
        end_time = int(time.time())
        total_time = end_time - start_time
        LOGGER.info('Final time in seconds to produce statistics {}'.format(
            total_time))
    except Exception as e:
        LOGGER.exception('Exception found while running statistics script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise Exception(e)
    job_log(start_time,
            temp_dir,
            status='Success',
            filename=os.path.basename(__file__))
    LOGGER.info('Job finished successfully')
Пример #13
0
def main():
    start_time = int(time.time())
    parser = argparse.ArgumentParser()

    parser.add_argument('--config-path',
                        type=str,
                        default=os.environ['YANGCATALOG_CONFIG_PATH'],
                        help='Set path to config file')
    parser.add_argument(
        '--compress',
        action='store_true',
        default=True,
        help='Set whether to compress snapshot files. Default is True')
    args = parser.parse_args()
    config_path = args.config_path
    config = create_config(config_path)
    log_directory = config.get('Directory-Section', 'logs')
    temp_dir = config.get('Directory-Section', 'temp')
    ys_users = config.get('Directory-Section', 'ys-users')
    cache_directory = config.get('Directory-Section', 'cache')
    es_aws = config.get('DB-Section', 'es-aws')

    log_file_path = os.path.join(log_directory, 'jobs', 'removeUnused.log')
    LOGGER = log.get_logger('removeUnused', log_file_path)
    LOGGER.info('Starting Cron job remove unused files')

    current_time = time.time()
    cutoff = current_time - DAY
    try:
        LOGGER.info('Removing old tmp directory representing int folders')
        for dir in next(os.walk(temp_dir))[1]:
            if represents_int(dir):
                creation_time = os.path.getctime(os.path.join(temp_dir, dir))
                if creation_time < cutoff:
                    shutil.rmtree(os.path.join(temp_dir, dir))

        LOGGER.info('Removing old ys temporary users')
        dirs = os.listdir(ys_users)
        for dir in dirs:
            abs = os.path.abspath('{}/{}'.format(ys_users, dir))
            if not abs.endswith('yangcat') and not abs.endswith('yang'):
                try:
                    shutil.rmtree(abs)
                except Exception:
                    pass

        LOGGER.info('Removing old correlation ids')
        # removing correlation ids from file that are older than a day
        # Be lenient to missing files
        try:
            filename = open('{}/correlation_ids'.format(temp_dir), 'r')
            lines = filename.readlines()
            filename.close()
        except IOError:
            lines = []
        with open('{}/correlation_ids'.format(temp_dir), 'w') as filename:
            for line in lines:
                line_datetime = line.split(' -')[0]
                t = dt.strptime(line_datetime, '%a %b %d %H:%M:%S %Y')
                diff = dt.now() - t
                if diff.days == 0:
                    filename.write(line)

        LOGGER.info('Removing old yangvalidator cache dirs')
        yang_validator_cache = os.path.join(temp_dir, 'yangvalidator')
        cutoff = current_time - 2 * DAY
        dirs = os.listdir(yang_validator_cache)
        for dir in dirs:
            if dir.startswith('yangvalidator-v2-cache-'):
                creation_time = os.path.getctime(
                    os.path.join(yang_validator_cache, dir))
                if creation_time < cutoff:
                    try:
                        shutil.rmtree(os.path.join(yang_validator_cache, dir))
                    except PermissionError:
                        LOGGER.exception(
                            'Problem while deleting {}'.format(dir))
                        continue

        if es_aws != 'True':
            LOGGER.info('Removing old elasticsearch snapshots')
            es_manager = ESManager()
            es_manager.create_snapshot_repository(args.compress)
            sorted_snapshots = es_manager.get_sorted_snapshots()

            for snapshot in sorted_snapshots[:-5]:
                es_manager.delete_snapshot(snapshot['snapshot'])

        def hash_file(path: str) -> bytes:
            sha1 = hashlib.sha1()

            with open(path, 'rb') as byte_file:
                while True:
                    data = byte_file.read(BLOCK_SIZE)
                    if not data:
                        break
                    sha1.update(data)

            return sha1.digest()

        def hash_node(path: str) -> bytes:
            if os.path.isfile(path):
                return hash_file(path)
            elif os.path.isdir(path):
                sha1 = hashlib.sha1()
                for root, _, filenames in os.walk(path):
                    for filename in filenames:
                        file_path = os.path.join(root, filename)
                        # we only want to compare the contents, not the top directory name
                        relative_path = file_path[len(path):]
                        file_signature = relative_path.encode() + hash_file(
                            file_path)
                        sha1.update(file_signature)
                return sha1.digest()
            else:
                assert False

        # remove  all files that are same keep the latest one only. Last two months keep all different content json files
        # other 4 months (6 in total) keep only latest, remove all other files
        def remove_old_backups(subdir: str):
            backup_directory = os.path.join(cache_directory, subdir)
            list_of_backups = get_list_of_backups(backup_directory)
            backup_name_latest = os.path.join(backup_directory,
                                              list_of_backups[-1])

            def diff_month(later_datetime, earlier_datetime):
                return (later_datetime.year - earlier_datetime.year
                        ) * 12 + later_datetime.month - earlier_datetime.month

            to_remove = []
            last_six_months = {}
            last_two_months = {}

            today = dt.now()
            for backup in list_of_backups:
                backup_dt = dt.strptime(backup[:backup.index('.')],
                                        backup_date_format)
                month_difference = diff_month(today, backup_dt)
                if month_difference > 6:
                    to_remove.append(backup)
                elif month_difference > 2:
                    month = backup_dt.month
                    if month in last_six_months:
                        if last_six_months[month] > backup:
                            to_remove.append(backup)
                        else:
                            to_remove.append(last_six_months[month])
                            last_six_months[month] = backup
                    else:
                        last_six_months[month] = backup
                else:
                    backup_path = os.path.join(backup_directory, backup)
                    currently_processed_backup_hash = hash_node(backup_path)
                    if currently_processed_backup_hash in last_two_months:
                        if last_two_months[
                                currently_processed_backup_hash] > backup:
                            to_remove.append(backup)
                        else:
                            to_remove.append(last_two_months[
                                currently_processed_backup_hash])
                    last_two_months[currently_processed_backup_hash] = backup
            for backup in to_remove:
                backup_path = os.path.join(backup_directory, backup)
                if backup_path != backup_name_latest:
                    if os.path.isdir(backup_path):
                        shutil.rmtree(backup_path)
                    elif os.path.isfile(backup_path):
                        os.unlink(backup_path)

        LOGGER.info('Removing old cache json files')
        remove_old_backups('confd')
    except Exception as e:
        LOGGER.exception('Exception found while running removeUnused script')
        job_log(start_time,
                temp_dir,
                error=str(e),
                status='Fail',
                filename=os.path.basename(__file__))
        raise e
    job_log(start_time,
            temp_dir,
            status='Success',
            filename=os.path.basename(__file__))
    LOGGER.info('Job finished successfully')