Exemple #1
0
    def test_parse_dependents(self, mock_requests_get: mock.MagicMock):
        payload = self.payloads['parse_dependents']
        all_modules = {'module': payload[0]['new']}
        mock_requests_get.return_value.json.return_value = {'module': payload[0]['existing']}

        complicatedAlgorithms = ModulesComplicatedAlgorithms(yc_gc.logs_dir, self.yangcatalog_api_prefix,
                                                             yc_gc.credentials, self.save_file_dir,
                                                             yc_gc.temp_dir, all_modules, yc_gc.yang_models, yc_gc.temp_dir,
                                                             yc_gc.json_ytree)
        complicatedAlgorithms.parse_requests()
        new = complicatedAlgorithms.new_modules
        self.assertIn({'name': 'n1', 'revision': '1'}, new['e1']['1']['dependents'])
        self.assertIn({'name': 'n2', 'revision': '1'}, new['e1']['1']['dependents'])
        self.assertNotIn('1', new['e2'])
        self.assertIn({'name': 'n2', 'revision': '1'}, new['n1']['1']['dependents'])
        self.assertIn({'name': 'e2', 'revision': '1'}, new['n1']['1']['dependents'])
        self.assertNotIn('1', new['n2'])
Exemple #2
0
                         set_key=key,
                         apiIp=args.api_ip)
    if not args.api:
        thread = None
        if not args.force_indexing:
            thread = threading.Thread(target=reload_cache_in_parallel)
            thread.start()
            LOGGER.info('Run complicated algorithms')
            complicatedAlgorithms = ModulesComplicatedAlgorithms(
                log_directory, yangcatalog_api_prefix, args.credentials,
                args.protocol, args.ip, args.port, args.save_file_dir, direc,
                None, yang_models, temp_dir)
            complicatedAlgorithms.parse_non_requests()
            LOGGER.info('Waiting for cache reload to finish')
            thread.join()
            complicatedAlgorithms.parse_requests()
            LOGGER.info('Populating with new data of complicated algorithms')
            complicatedAlgorithms.populate()
        else:
            url = (yangcatalog_api_prefix + 'load-cache')
            LOGGER.info('{}'.format(url))
            response = requests.post(url,
                                     None,
                                     auth=(args.credentials[0],
                                           args.credentials[1]))
            if response.status_code != 201:
                LOGGER.warning('Could not send a load-cache request')

        try:
            shutil.rmtree('{}'.format(direc))
        except OSError:
Exemple #3
0
def on_request(ch, method, props, body):
    """Function called when something was sent from API sender. This function
    will process all the requests that would take too long to process for API.
    When the processing is done we will sent back the result of the request
    which can be either 'Failed' or 'Finished successfully' with corespondent
    correlation id. If the request 'Failed' it will sent back also a reason why
    it failed.
            Arguments:
                :param body: (str) String of arguments that need to be processed
                separated by '#'.
    """
    try:
        if sys.version_info >= (3, 4):
            body = body.decode(encoding='utf-8', errors='strict')
        LOGGER.info('Received request with body {}'.format(body))
        arguments = body.split('#')
        if body == 'run_ietf':
            LOGGER.info('Running all ietf and openconfig modules')
            final_response = run_ietf()
        elif 'github' == arguments[-1]:
            LOGGER.info('Github automated message starting to populate')
            paths_plus = arguments[arguments.index('repoLocalDir'):]
            LOGGER.info('paths plus {}'.format(paths_plus))
            arguments = arguments[:arguments.index('repoLocalDir')]
            LOGGER.info('arguments {}'.format(arguments))
            paths = paths_plus[1:-2]
            LOGGER.info('paths {}'.format(paths))
            try:
                for path in paths:
                    with open(temp_dir + "/log_trigger.txt", "w") as f:
                        local_dir = paths_plus[-2]
                        arguments = arguments + [
                            "--dir", local_dir + "/" + path
                        ]
                        subprocess.check_call(arguments, stderr=f)
                final_response = __response_type[1]
            except subprocess.CalledProcessError as e:
                final_response = __response_type[0]
                mf = messageFactory.MessageFactory()
                mf.send_automated_procedure_failed(
                    arguments, temp_dir + "/log_no_sdo_api.txt")
                LOGGER.error(
                    'check log_trigger.txt Error calling process populate.py because {}\n\n with error {}'
                    .format(e.stdout, e.stderr))
            except:
                final_response = __response_type[0]
                LOGGER.error(
                    "check log_trigger.txt failed to process github message with error {}"
                    .format(sys.exc_info()[0]))
        else:
            global all_modules
            all_modules = None
            if arguments[-3] == 'DELETE':
                LOGGER.info('Deleting single module')
                if 'http' in arguments[0]:
                    final_response = process_module_deletion(arguments)
                    credentials = arguments[3:5]
                else:
                    final_response = process_vendor_deletion(arguments)
                    credentials = arguments[7:9]
            elif arguments[-3] == 'DELETE_MULTIPLE':
                LOGGER.info('Deleting multiple modules')
                final_response = process_module_deletion(arguments, True)
                credentials = arguments[3:5]
            elif '--sdo' in arguments[2]:
                final_response = process_sdo(arguments)
                credentials = arguments[11:13]
                direc = arguments[6]
                shutil.rmtree(direc)
            else:
                final_response = process_vendor(arguments)
                credentials = arguments[10:12]
                direc = arguments[5]
                shutil.rmtree(direc)
            if final_response.split('#split#')[0] == __response_type[1]:
                res = make_cache(credentials)
                if res.status_code != 201:
                    final_response = __response_type[
                        0] + '#split#Server error-> could not reload cache'

                if all_modules:
                    complicatedAlgorithms = ModulesComplicatedAlgorithms(
                        log_directory, yangcatalog_api_prefix, credentials,
                        confd_protocol, confd_ip, confdPort, save_file_dir,
                        None, all_modules, yang_models, temp_dir)
                    complicatedAlgorithms.parse_non_requests()
                    complicatedAlgorithms.parse_requests()
                    complicatedAlgorithms.populate()
    except Exception as e:
        final_response = __response_type[0]
        LOGGER.error("receiver failed with message {}".format(e))
    LOGGER.info('Receiver is done with id - {} and message = {}'.format(
        props.correlation_id, str(final_response)))

    f = open('{}/correlation_ids'.format(temp_dir), 'r')
    lines = f.readlines()
    f.close()
    with open('{}/correlation_ids'.format(temp_dir), 'w') as f:
        for line in lines:
            if props.correlation_id in line:
                new_line = '{} -- {} - {}\n'.format(datetime.now().ctime(),
                                                    props.correlation_id,
                                                    str(final_response))
                f.write(new_line)
            else:
                f.write(line)
Exemple #4
0
def main(scriptConf=None):
    if scriptConf is None:
        scriptConf = ScriptConfig()
    args = scriptConf.args
    log_directory = scriptConf.log_directory
    is_uwsgi = scriptConf.is_uwsgi
    yang_models = scriptConf.yang_models
    temp_dir = scriptConf.temp_dir
    cache_dir = scriptConf.cache_dir
    json_ytree = scriptConf.json_ytree
    global LOGGER
    LOGGER = log.get_logger('populate', '{}/parseAndPopulate.log'.format(log_directory))

    separator = ':'
    suffix = args.api_port
    if is_uwsgi == 'True':
        separator = '/'
        suffix = 'api'
    yangcatalog_api_prefix = '{}://{}{}{}/'.format(args.api_protocol, args.api_ip, separator, suffix)
    confdService = ConfdService()
    redisConnection = RedisConnection()
    LOGGER.info('Starting the populate script')
    start = time.time()
    if args.api:
        json_dir = args.dir
    else:
        json_dir = create_dir_name(temp_dir)
        os.makedirs(json_dir)
    LOGGER.info('Calling runCapabilities script')
    try:
        runCapabilities = import_module('parseAndPopulate.runCapabilities')
        script_conf = configure_runCapabilities(runCapabilities, args, json_dir)
        runCapabilities.main(scriptConf=script_conf)
    except Exception as e:
        LOGGER.exception('runCapabilities error:\n{}'.format(e))
        raise e

    body_to_send = {}
    if args.notify_indexing:
        LOGGER.info('Sending files for indexing')
        body_to_send = prepare_for_es_indexing(yangcatalog_api_prefix, os.path.join(json_dir, 'prepare.json'),
                                               LOGGER, args.save_file_dir, force_indexing=args.force_indexing)

    LOGGER.info('Populating yang catalog with data. Starting to add modules')
    with open(os.path.join(json_dir, 'prepare.json')) as data_file:
        data = data_file.read()
    modules = json.loads(data).get('module', [])
    errors = confdService.patch_modules(modules)
    redisConnection.populate_modules(modules)

    # In each json
    if os.path.exists(os.path.join(json_dir, 'normal.json')):
        LOGGER.info('Starting to add vendors')
        with open(os.path.join(json_dir, 'normal.json')) as data:
            try:
                vendors = json.loads(data.read())['vendors']['vendor']
            except KeyError as e:
                LOGGER.error('No files were parsed. This probably means the directory is missing capability xml files')
                raise e
        errors = errors or confdService.patch_vendors(vendors)
        redisConnection.populate_implementation(vendors)
    if body_to_send:
        LOGGER.info('Sending files for indexing')
        send_for_es_indexing(body_to_send, LOGGER, scriptConf.changes_cache_path, scriptConf.delete_cache_path,
                             scriptConf.lock_file)
    if modules:
        process_reload_cache = multiprocessing.Process(target=reload_cache_in_parallel,
                                                       args=(args.credentials, yangcatalog_api_prefix,))
        process_reload_cache.start()
        LOGGER.info('Running ModulesComplicatedAlgorithms from populate.py script')
        recursion_limit = sys.getrecursionlimit()
        sys.setrecursionlimit(50000)
        complicatedAlgorithms = ModulesComplicatedAlgorithms(log_directory, yangcatalog_api_prefix,
                                                             args.credentials, args.save_file_dir, json_dir, None,
                                                             yang_models, temp_dir, json_ytree)
        complicatedAlgorithms.parse_non_requests()
        LOGGER.info('Waiting for cache reload to finish')
        process_reload_cache.join()
        complicatedAlgorithms.parse_requests()
        sys.setrecursionlimit(recursion_limit)
        LOGGER.info('Populating with new data of complicated algorithms')
        complicatedAlgorithms.populate()
        end = time.time()
        LOGGER.info('Populate took {} seconds with the main and complicated algorithm'.format(int(end - start)))

        # Keep new hashes only if the ConfD was patched successfully
        if not errors:
            path = os.path.join(json_dir, 'temp_hashes.json')
            fileHasher = FileHasher('backend_files_modification_hashes', cache_dir, not args.force_parsing, log_directory)
            updated_hashes = fileHasher.load_hashed_files_list(path)
            if updated_hashes:
                fileHasher.merge_and_dump_hashed_files_list(updated_hashes)

    LOGGER.info('Populate script finished successfully')