Ejemplo n.º 1
0
    def __init__(self):
        # Load Images from file
        self._images = []
        with open(generate_load_data.IMAGES_FILE, "r+b") as file:
            image_data = jsonutils.load(file)
        for image in image_data:
            fake_image = FakeImage(**image)
            self._images.append(fake_image)
        self.images = FakeImages(self._images)

        # Load Images members from file
        self._images_members_dict = dict()
        self._image_members_list = []
        with open(generate_load_data.IMAGE_MEMBERS_FILE, "r+b") as file:
            image_members_data = jsonutils.load(file)
        for image_id, image_members in image_members_data.items():
            for image_member in image_members:
                fake_image_member = FakeImageMember(**image_member)
                self._image_members_list.append(fake_image_member)
            self._images_members_dict[image_id] = self._image_members_list
        self.image_members = FakeImageMembers(self._images_members_dict)

        # Load Metadef namespaces from file
        self._metadefs_namespace = []
        self.metadefs_namespace = []
        with open(generate_load_data.METADEFS_FILE, "r+b") as file:
            metadefs_namespace_data = jsonutils.load(file)
        for metadef_namespace in metadefs_namespace_data:
            fake_namespace = FakeNamespace(**metadef_namespace)
            self._metadefs_namespace.append(fake_namespace)
        self.metadefs_namespace = FakeNamespaces(self._metadefs_namespace)
Ejemplo n.º 2
0
    def __init__(self):
        # Load Images from file
        self._images = []
        with open(generate_load_data.IMAGES_FILE, "r+b") as file:
            image_data = jsonutils.load(file)
        for image in image_data:
            fake_image = FakeImage(**image)
            self._images.append(fake_image)
        self.images = FakeImages(self._images)

        # Load Images members from file
        self._images_members_dict = dict()
        self._image_members_list = []
        with open(generate_load_data.IMAGE_MEMBERS_FILE, "r+b") as file:
            image_members_data = jsonutils.load(file)
        for image_id, image_members in image_members_data.items():
            for image_member in image_members:
                fake_image_member = FakeImageMember(**image_member)
                self._image_members_list.append(fake_image_member)
            self._images_members_dict[image_id] = self._image_members_list
        self.image_members = FakeImageMembers(self._images_members_dict)

        # Load Metadef namespaces from file
        self._metadefs_namespace = []
        self.metadefs_namespace = []
        with open(generate_load_data.METADEFS_FILE, "r+b") as file:
            metadefs_namespace_data = jsonutils.load(file)
        for metadef_namespace in metadefs_namespace_data:
            fake_namespace = FakeNamespace(**metadef_namespace)
            self._metadefs_namespace.append(fake_namespace)
        self.metadefs_namespace = FakeNamespaces(self._metadefs_namespace)
Ejemplo n.º 3
0
    def check_scale(self):
        scale_ops = []
        if self.testcase.get('scaling'):
            scale_ops = self.testcase['scaling']
        else:
            scale_path = os.path.join(self.template_path, 'scale.json')
            if os.path.exists(scale_path):
                with open(scale_path) as data:
                    scale_ops = json.load(data)

        body = {}
        for op in scale_ops:
            if op['operation'] == 'add':
                if 'add_node_groups' not in body:
                    body['add_node_groups'] = []
                body['add_node_groups'].append({
                    'node_group_template_id':
                    self.ng_id_map[op['node_group']],
                    'count': op['size'],
                    'name': utils.rand_name(op['node_group'])
                })
            if op['operation'] == 'resize':
                if 'resize_node_groups' not in body:
                    body['resize_node_groups'] = []
                body['resize_node_groups'].append({
                    'name': self.ng_name_map[op['node_group']],
                    'count': op['size']
                })

        if body:
            self.sahara.scale_cluster(self.cluster_id, body)
            self._poll_cluster_status(self.cluster_id)
Ejemplo n.º 4
0
    def _load_zones(self):
        zonefile = cfg.CONF.ml2_snabb.zone_definition_file
        networks = {}
        if zonefile != '':
            zonelines = jsonutils.load(open(zonefile))
            for entry in zonelines:
                host, port, zone, vlan, subnet = entry["host"], entry[
                    "port"], entry["zone"], entry["vlan"], entry["subnet"]
                used = []
                for u in entry["used"]:
                    used.append(IPAddress(u))
                host = host.strip()
                port = port.strip()
                zone = int(zone)
                vlan = int(vlan)
                subnet = netaddr.IPNetwork(subnet)
                networks.setdefault(host, {})
                networks[host].setdefault(port, {})
                networks[host][port][zone] = (subnet, vlan, used)
                LOG.debug(
                    "Loaded zone host:%s port:%s "
                    "zone:%s subnet:%s vlan:%s", host, port, zone, subnet,
                    vlan)

        return networks
Ejemplo n.º 5
0
 def _load_file(self, handle):
     """Decode the JSON file. Broken out for testing."""
     try:
         return jsonutils.load(handle)
     except ValueError as e:
         LOG.exception(_LE("Could not decode scheduler options: '%s'"), e)
         return {}
Ejemplo n.º 6
0
    def _create_node_group_templates(self):
        ng_id_map = {}
        floating_ip_pool = None
        if self.network['type'] == 'neutron':
            floating_ip_pool = self.neutron.get_network_id(
                self.network['public_network'])
        elif not self.network['auto_assignment_floating_ip']:
            floating_ip_pool = self.network['public_network']

        node_groups = []
        if self.testcase.get('node_group_templates'):
            for ng in self.testcase['node_group_templates']:
                node_groups.append(ng)
        else:
            templates_path = os.path.join(self.template_path,
                                          'node_group_template_*.json')
            for template_file in glob.glob(templates_path):
                with open(template_file) as data:
                    node_groups.append(json.load(data))

        for ng in node_groups:
            kwargs = dict(ng)
            kwargs.update(self.plugin_opts)
            kwargs['name'] = utils.rand_name(kwargs['name'])
            kwargs['floating_ip_pool'] = floating_ip_pool
            ng_id = self.__create_node_group_template(**kwargs)
            ng_id_map[ng['name']] = ng_id

        return ng_id_map
Ejemplo n.º 7
0
def run():
    d = jsonutils.load(sys.stdin.buffer)
    cni_conf = utils.CNIConfig(d)
    args = (['--config-file', cni_conf.kuryr_conf]
            if 'kuryr_conf' in d else [])

    try:
        if cni_conf.debug:
            args.append('-d')
    except AttributeError:
        pass
    config.init(args)
    config.setup_logging()

    # Initialize o.vo registry.
    k_objects.register_locally_defined_vifs()
    os_vif.initialize()

    runner = cni_api.CNIDaemonizedRunner()

    def _timeout(signum, frame):
        runner._write_dict(sys.stdout, {
            'msg': 'timeout',
            'code': k_const.CNI_TIMEOUT_CODE,
        })
        LOG.debug('timed out')
        sys.exit(1)

    signal.signal(signal.SIGALRM, _timeout)
    signal.alarm(_CNI_TIMEOUT)
    status = runner.run(os.environ, cni_conf, sys.stdout)
    LOG.debug("Exiting with status %s", status)
    if status:
        sys.exit(status)
Ejemplo n.º 8
0
    def _create_cluster_template(self):
        self.ng_name_map = {}
        template = None
        if self.testcase.get('cluster_template'):
            template = self.testcase['cluster_template']
        else:
            template_path = os.path.join(self.template_path,
                                         'cluster_template.json')
            with open(template_path) as data:
                template = json.load(data)

        kwargs = dict(template)
        ngs = kwargs['node_group_templates']
        del kwargs['node_group_templates']
        kwargs['node_groups'] = []
        for ng, count in ngs.items():
            ng_name = utils.rand_name(ng)
            self.ng_name_map[ng] = ng_name
            kwargs['node_groups'].append({
                'name':
                ng_name,
                'node_group_template_id':
                self.ng_id_map[ng],
                'count':
                count
            })

        kwargs.update(self.plugin_opts)
        kwargs['name'] = utils.rand_name(kwargs['name'])
        if self.network['type'] == 'neutron':
            kwargs['net_id'] = self.neutron.get_network_id(
                self.network['private_network'])

        return self.__create_cluster_template(**kwargs)
Ejemplo n.º 9
0
 def read_rules(path):
     try:
         with open(path) as file:
             return jsonutils.load(file)
     except ValueError as e:
         raise SystemExit(_('Error while parsing rules '
                            '%(path)s: %(err)s') % {'path': path, 'err': e})
Ejemplo n.º 10
0
    def test_playbook_persistence(self):
        r_playbook = m.Playbook.query.first()
        tmpfile = os.path.join(self.app.config['ARA_TMP_DIR'], 'ara.json')

        with open(tmpfile, 'rb') as file:
            data = jsonutils.load(file)
        self.assertEqual(r_playbook.id, data['playbook']['id'])
Ejemplo n.º 11
0
 def _assert_expect_json(self, json_data):
     expected_file_name = os.path.join(
         os.path.dirname(os.path.abspath(__file__)),
         'subunit_describe_calls_data', 'calls_subunit_expected.json')
     with open(expected_file_name, "rb") as read_file:
         expected_result = json.load(read_file)
     self.assertDictEqual(expected_result, json_data)
Ejemplo n.º 12
0
    def _create_cluster_template(self):
        self.ng_name_map = {}
        template = None
        if self.testcase.get('cluster_template'):
            template = self.testcase['cluster_template']
        else:
            template_path = os.path.join(self.template_path,
                                         'cluster_template.json')
            with open(template_path) as data:
                template = json.load(data)

        kwargs = dict(template)
        ngs = kwargs['node_group_templates']
        del kwargs['node_group_templates']
        kwargs['node_groups'] = []
        for ng, count in ngs.items():
            ng_name = utils.rand_name(ng)
            self.ng_name_map[ng] = ng_name
            kwargs['node_groups'].append({
                'name': ng_name,
                'node_group_template_id': self.ng_id_map[ng],
                'count': count})

        kwargs.update(self.plugin_opts)
        kwargs['name'] = utils.rand_name(kwargs['name'])
        if self.network['type'] == 'neutron':
            kwargs['net_id'] = self.neutron.get_network_id(
                self.network['private_network'])

        return self.__create_cluster_template(**kwargs)
Ejemplo n.º 13
0
def load_json(input_string):
    try:
        # binary mode is needed due to bug/1515231
        with open(input_string, 'r+b') as fh:
            return jsonutils.load(fh)
    except IOError:
        return jsonutils.loads(input_string)
Ejemplo n.º 14
0
    def _create_cluster_template(self):
        self.ng_name_map = {}
        template = None
        if self.testcase.get("cluster_template"):
            template = self.testcase["cluster_template"]
        else:
            template_path = os.path.join(self.template_path, "cluster_template.json")
            with open(template_path) as data:
                template = json.load(data)

        kwargs = dict(template)
        ngs = kwargs["node_group_templates"]
        del kwargs["node_group_templates"]
        kwargs["node_groups"] = []
        for ng, count in ngs.items():
            ng_name = utils.rand_name(ng)
            self.ng_name_map[ng] = ng_name
            kwargs["node_groups"].append(
                {"name": ng_name, "node_group_template_id": self.ng_id_map[ng], "count": count}
            )

        kwargs.update(self.plugin_opts)
        kwargs["name"] = utils.rand_name(kwargs["name"])
        if self.network["type"] == "neutron":
            kwargs["net_id"] = self.neutron.get_network_id(self.network["private_network"])

        return self.__create_cluster_template(**kwargs)
Ejemplo n.º 15
0
    def __init__(self):
        super(AccessRulesConfig, self).__init__()
        if CONF.access_rules_config.permissive:
            return
        access_rules_file = CONF.access_rules_config.rules_file
        self.access_rules = dict()
        self.access_rules_json = dict()
        try:
            with open(access_rules_file, "rb") as f:
                self.access_rules_json = jsonutils.load(f)
        except IOError:
            LOG.warning('No config file found for access rules, application'
                        ' credential access rules will be unavailable.')
            return
        except ValueError as e:
            raise exception.AccessRulesConfigFileError(error=e)

        for service, rules in self.access_rules_json.items():
            self.access_rules[service] = dict()
            for rule in rules:
                try:
                    self.access_rules[service].setdefault(
                        rule['method'], []).append({'path': rule['path']})
                except KeyError as e:
                    raise exception.AccessRulesConfigFileError(error=e)
Ejemplo n.º 16
0
def action(ns):
    result = 0
    if not ns.all:
        tests = json.load(open(ns.tests, 'r'))
    else:
        tests = discover_stress_tests(filter_attr=ns.type,
                                      call_inherited=ns.call_inherited)

    if ns.serial:
        # Duration is total time
        duration = ns.duration / len(tests)
        for test in tests:
            step_result = driver.stress_openstack([test],
                                                  duration,
                                                  ns.number,
                                                  ns.stop)
            # NOTE(mkoderer): we just save the last result code
            if (step_result != 0):
                result = step_result
                if ns.stop:
                    return result
    else:
        result = driver.stress_openstack(tests,
                                         ns.duration,
                                         ns.number,
                                         ns.stop)
    return result
Ejemplo n.º 17
0
 def _load_file(self, handle):
     """Decode the JSON file. Broken out for testing."""
     try:
         return jsonutils.load(handle)
     except ValueError:
         LOG.exception(_LE("Could not decode scheduler options"))
         return {}
Ejemplo n.º 18
0
def main():
    ns = parser.parse_args()
    result = 0
    if not ns.all:
        tests = json.load(open(ns.tests, 'r'))
    else:
        tests = discover_stress_tests(filter_attr=ns.type,
                                      call_inherited=ns.call_inherited)

    if ns.serial:
        # Duration is total time
        duration = ns.duration / len(tests)
        for test in tests:
            step_result = driver.stress_openstack([test],
                                                  duration,
                                                  ns.number,
                                                  ns.stop)
            # NOTE(mkoderer): we just save the last result code
            if (step_result != 0):
                result = step_result
                if ns.stop:
                    return result
    else:
        result = driver.stress_openstack(tests,
                                         ns.duration,
                                         ns.number,
                                         ns.stop)
    return result
Ejemplo n.º 19
0
    def package_specs(self):
        """Returns a generator yeilding package specifications i.e.
        dicts with 'Name' and 'Version' fields
        """
        self._file.seek(0)
        bundle = None
        try:
            bundle = jsonutils.load(self._file)
        except ValueError:
            pass
        if bundle is None:
            try:
                bundle = yaml.safe_load(self._file)
            except yaml.error.YAMLError:
                pass

        if bundle is None:
            raise ValueError("Can't parse bundle contents")

        if 'Packages' not in bundle:
            return

        for package in bundle['Packages']:
            if 'Name' not in package:
                continue
            yield package
Ejemplo n.º 20
0
def main():
    d = jsonutils.load(sys.stdin.buffer)
    cni_conf = utils.CNIConfig(d)
    args = (['--config-file', cni_conf.zun_conf] if 'zun_conf' in d else [])

    try:
        if cni_conf.debug:
            args.append('-d')
    except AttributeError:
        pass
    config.init(args)
    if os.environ.get('CNI_COMMAND') == 'VERSION':
        CONF.set_default('use_stderr', True)

    # Initialize o.vo registry.
    os_vif.initialize()

    runner = cni_api.CNIDaemonizedRunner()

    def _timeout(signum, frame):
        runner._write_dict(sys.stdout, {
            'msg': 'timeout',
            'code': consts.CNI_TIMEOUT_CODE,
        })
        LOG.debug('timed out')
        sys.exit(1)

    signal.signal(signal.SIGALRM, _timeout)
    signal.alarm(_CNI_TIMEOUT)
    status = runner.run(os.environ, cni_conf, sys.stdout)
    LOG.debug("Exiting with status %s", status)
    if status:
        sys.exit(status)
Ejemplo n.º 21
0
    def __init__(self):
        super(AccessRulesConfig, self).__init__()
        if CONF.access_rules_config.permissive:
            return
        access_rules_file = CONF.access_rules_config.rules_file
        self.access_rules = dict()
        self.access_rules_json = dict()
        try:
            with open(access_rules_file, "rb") as f:
                self.access_rules_json = jsonutils.load(f)
        except IOError:
            LOG.warning('No config file found for access rules, application'
                        ' credential access rules will be unavailable.')
            return
        except ValueError as e:
            raise exception.AccessRulesConfigFileError(error=e)

        for service, rules in self.access_rules_json.items():
            self.access_rules[service] = dict()
            for rule in rules:
                try:
                    self.access_rules[service].setdefault(
                        rule['method'], []).append({
                            'path': rule['path']
                        })
                except KeyError as e:
                    raise exception.AccessRulesConfigFileError(error=e)
Ejemplo n.º 22
0
Archivo: base.py Proyecto: lhcxx/sahara
    def _create_node_group_templates(self):
        ng_id_map = {}
        floating_ip_pool = None
        if self.network['type'] == 'neutron':
            floating_ip_pool = self.neutron.get_network_id(
                self.network['public_network'])
        elif not self.network['auto_assignment_floating_ip']:
            floating_ip_pool = self.network['public_network']

        node_groups = []
        if self.testcase.get('node_group_templates'):
            for ng in self.testcase['node_group_templates']:
                node_groups.append(ng)
        else:
            templates_path = os.path.join(self.template_path,
                                          'node_group_template_*.json')
            for template_file in glob.glob(templates_path):
                with open(template_file) as data:
                    node_groups.append(json.load(data))

        for ng in node_groups:
            kwargs = dict(ng)
            kwargs.update(self.plugin_opts)
            kwargs['name'] = utils.rand_name(kwargs['name'])
            kwargs['floating_ip_pool'] = floating_ip_pool
            ng_id = self.__create_node_group_template(**kwargs)
            ng_id_map[ng['name']] = ng_id

        return ng_id_map
Ejemplo n.º 23
0
def _create_dashboard(ip):
    url = 'http://*****:*****@{}:{}/api/dashboards/db'.format(ip, consts.GRAFANA_PORT)
    path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')

    for i in sorted(glob.iglob(path)):
        with open(i) as f:
            data = jsonutils.load(f)
        HttpClient().post(url, data)
Ejemplo n.º 24
0
def set_repos(nodes):
    global REPOS
    with open(os.path.join(os.path.dirname(__file__),
                           'fixtures/repos_ubuntu.json')) as fp:
        repos = jsonutils.load(fp)

    for node_id in nodes.keys():
        set_repos_for_node(node_id, deepcopy(repos))
Ejemplo n.º 25
0
 def read_rules(self, path):
     self.rules_pathname = path
     try:
         with open(path) as file:
             self.rules = jsonutils.load(file)
     except ValueError as e:
         raise SystemExit(_('Error while parsing rules '
                            '%(path)s: %(err)s') % {'path': path, 'err': e})
def read_jsonfile(name):
    retval = {}
    try:
        with open(name, "r") as f:
            retval = jsonutils.load(f)
    except Exception as e:
        LOG.warn("Exception in reading file: %s" % str(e))
    return retval
Ejemplo n.º 27
0
    def load_message(filename):
        cwd = os.path.abspath(os.path.dirname(__file__))
        data_folder = os.path.join(cwd, "data")

        with open(os.path.join(data_folder, filename), 'rb') as json_file:
            json_data = jsonutils.load(json_file)

        return json_data
def read_jsonfile(name):
    retval = {}
    try:
        with open(name, "r") as f:
            retval = jsonutils.load(f)
    except Exception as e:
        LOG.warn("Exception in reading file: %s" % str(e))
    return retval
 def _read_neutron_db_data(self):
     """
     helper function for reading the dummy neutron router db
     """
     with open(base.ROOTDIR +
               '/unit/cisco/etc/cfg_syncer/neutron_router_db.json',
               'r') as fp:
         self.router_db_info = jsonutils.load(fp)
Ejemplo n.º 30
0
def load_doc_from_json_file(fname, debug=False):
    with open(fname, 'rb') as fd:
        try:
            doc = json.load(fd)
        except Exception as e:
            raise Exception("Unable to load conf file. {0}".format(e))
        if debug:
            print("File {0} loaded: ".format(fname))
        return doc
Ejemplo n.º 31
0
def load_doc_from_json_file(fname, debug=False):
    with open(fname, 'rb') as fd:
        try:
            doc = json.load(fd)
        except Exception as e:
            raise Exception("Unable to load conf file. {0}".format(e))
        if debug:
            print("File {0} loaded: ".format(fname))
        return doc
Ejemplo n.º 32
0
    def _get_config_provider(self):
        if self.config_provider is None:
            self.config_provider = cfgprov.ConfigurationProvider(
                json.load(pkg.resource_stream(
                          version.version_info.package,
                          'plugins/hdp/versions/version_1_3_2/resources/'
                          'ambari-config-resource.json')),
                hadoop_version='1.3.2')

        return self.config_provider
Ejemplo n.º 33
0
    def _modify_policy_file(self, rules):
        with open(self.policy_file, 'r+b') as policy_file:
            existing_policy = jsonutils.load(policy_file)

        existing_policy.update(rules)

        with open(self.policy_file, 'w') as policy_file:
            jsonutils.dump(existing_policy, policy_file)

        time.sleep(2)
    def _read_asr_running_cfg(self, file_name='asr_running_cfg.json'):
        """
        helper function for reading sample asr running cfg files (json format)
        """
        asr_running_cfg = (
            '/unit/cisco/etc/cfg_syncer/%s' % (file_name))

        with open(base.ROOTDIR + asr_running_cfg, 'r') as fp:
            asr_running_cfg_json = jsonutils.load(fp)
            return asr_running_cfg_json
Ejemplo n.º 35
0
    def _modify_policy_file(self, rules):
        with open(self.policy_file, 'r+b') as policy_file:
            existing_policy = jsonutils.load(policy_file)

        existing_policy.update(rules)

        with open(self.policy_file, 'w') as policy_file:
            jsonutils.dump(existing_policy, policy_file)

        time.sleep(2)
Ejemplo n.º 36
0
def run():
    if six.PY3:
        d = jsonutils.load(sys.stdin.buffer)
    else:
        d = jsonutils.load(sys.stdin)
    cni_conf = utils.CNIConfig(d)
    args = ['--config-file', cni_conf.kuryr_conf]

    try:
        if cni_conf.debug:
            args.append('-d')
    except AttributeError:
        pass
    config.init(args)
    config.setup_logging()

    # Initialize o.vo registry.
    k_objects.register_locally_defined_vifs()
    os_vif.initialize()

    if CONF.cni_daemon.daemon_enabled:
        runner = cni_api.CNIDaemonizedRunner()
    else:
        versionutils.deprecation_warning(
            'Deploying kuryr-kubernetes without kuryr-daemon service', 'R')
        runner = cni_api.CNIStandaloneRunner(k8s_cni.K8sCNIPlugin())
    LOG.info("Using '%s' ", runner.__class__.__name__)

    def _timeout(signum, frame):
        runner._write_dict(sys.stdout, {
            'msg': 'timeout',
            'code': k_const.CNI_TIMEOUT_CODE,
        })
        LOG.debug('timed out')
        sys.exit(1)

    signal.signal(signal.SIGALRM, _timeout)
    signal.alarm(_CNI_TIMEOUT)
    status = runner.run(os.environ, cni_conf, sys.stdout)
    LOG.debug("Exiting with status %s", status)
    if status:
        sys.exit(status)
def _create_dashboard(ip, port, path):
    url = 'http://*****:*****@{}:{}/api/dashboards/db'.format(ip, port)
    logger.info("Fetched IP for dashboard creation!")
    with open(path) as f:
        data = jsonutils.load(f)
    try:
        post(url, {"dashboard": data})
        logger.info("Trying to post dashboard json!")
    except Exception:
        logger.info("Create dashboard failed")
        raise
Ejemplo n.º 38
0
    def _prepare_policy(self):
        policy = jsonutils.load(open(CONF.policy_file))

        # Convert all actions to require specified role
        for action, rule in policy.iteritems():
            policy[action] = 'role:%s' % self.role

        self.policy_dir = self.useFixture(fixtures.TempDir())
        self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
        with open(self.policy_file, 'w') as f:
            jsonutils.dump(policy, f)
Ejemplo n.º 39
0
    def _prepare_policy(self):
        policy = jsonutils.load(open(CONF.oslo_policy.policy_file))

        # Convert all actions to require specified role
        for action, rule in six.iteritems(policy):
            policy[action] = "role:%s" % self.role

        self.policy_dir = self.useFixture(fixtures.TempDir())
        self.policy_file = os.path.join(self.policy_dir.path, "policy.json")
        with open(self.policy_file, "w") as f:
            jsonutils.dump(policy, f)
Ejemplo n.º 40
0
    def _get_config_provider(self):
        if self.config_provider is None:
            self.config_provider = cfgprov.ConfigurationProvider(
                json.load(
                    pkg.resource_stream(
                        version.version_info.package,
                        'plugins/hdp/versions/version_1_3_2/resources/'
                        'ambari-config-resource.json')),
                hadoop_version='1.3.2')

        return self.config_provider
Ejemplo n.º 41
0
def doc_from_json_file(path_to_file):
    """Build a json from a file in the file system
    :param path_to_file: path to file
    :return: in memory file in json format
    """
    with open(path_to_file, 'rb') as fd:
        try:
            return json.load(fd)
        except Exception as err:
            logging.error(err)
            raise Exception('Unable to load conf file. {0}'.format(err))
Ejemplo n.º 42
0
    def _gen_sut_info_dict(self, sut_dir):
        sut_info = {}

        if os.path.isdir(sut_dir):
            root, _, files = next(os.walk(sut_dir))
            for filename in files:
                abs_path = os.path.join(root, filename)
                with open(abs_path) as f:
                    data = jsonutils.load(f)
                sut_info[filename] = data

        return sut_info
Ejemplo n.º 43
0
    def _prepare_policy(self):
        policy = jsonutils.load(open(CONF.policy_file))

        # Convert all actions to require specified role
        for action, rule in policy.iteritems():
            policy[action] = 'role:%s' % self.role

        self.policy_dir = self.useFixture(fixtures.TempDir())
        self.policy_file = os.path.join(self.policy_dir.path,
                                            'policy.json')
        with open(self.policy_file, 'w') as f:
            jsonutils.dump(policy, f)
Ejemplo n.º 44
0
def read_previous_results():
    """Read results of previous run.

    :return: dictionary of results if exist
    """
    try:
        with open(settings.LOAD_TESTS_PATHS['load_previous_tests_results'],
                  'r') as results_file:
            results = jsonutils.load(results_file)
    except (IOError, ValueError):
        results = {}
    return results
Ejemplo n.º 45
0
    def test_load(self):

        jsontext = u'{"a": "\u0442\u044d\u0441\u0442"}'
        expected = {u'a': u'\u0442\u044d\u0441\u0442'}

        for encoding in ('utf-8', 'cp1251'):
            fp = six.BytesIO(jsontext.encode(encoding))
            result = jsonutils.load(fp, encoding=encoding)
            self.assertEqual(expected, result)
            for key, val in result.items():
                self.assertIsInstance(key, six.text_type)
                self.assertIsInstance(val, six.text_type)
Ejemplo n.º 46
0
    def _create_dashboard(self, ip):
        url = 'http://*****:*****@{}:{}/api/dashboards/db'.format(ip, 3000)
        path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')

        for i in sorted(glob.iglob(path)):
            with open(i) as f:
                data = jsonutils.load(f)
            try:
                HttpClient().post(url, data)
            except Exception:
                LOG.exception('Create dashboard %s failed', i)
                raise
Ejemplo n.º 47
0
    def _create_dashboard(self, ip):
        url = 'http://*****:*****@{}:{}/api/dashboards/db'.format(ip, consts.GRAFANA_PORT)
        path = os.path.join(consts.REPOS_DIR, 'dashboard', 'opnfv_yardstick_tc*.json')

        for i in sorted(glob.iglob(path)):
            with open(i) as f:
                data = jsonutils.load(f)
            try:
                HttpClient().post(url, {"dashboard": data})
            except Exception:
                LOG.exception('Create dashboard %s failed', i)
                raise
Ejemplo n.º 48
0
    def test_load(self):

        jsontext = u'{"a": "\u0442\u044d\u0441\u0442"}'
        expected = {u"a": u"\u0442\u044d\u0441\u0442"}

        for encoding in ("utf-8", "cp1251"):
            fp = six.BytesIO(jsontext.encode(encoding))
            result = jsonutils.load(fp, encoding=encoding)
            self.assertEqual(expected, result)
            for key, val in result.items():
                self.assertIsInstance(key, six.text_type)
                self.assertIsInstance(val, six.text_type)
Ejemplo n.º 49
0
def read_previous_results():
    """Read results of previous run.

    :return: dictionary of results if exist
    """
    try:
        with open(settings.LOAD_TESTS_PATHS['load_previous_tests_results'],
                  'r') as results_file:
            results = jsonutils.load(results_file)
    except (IOError, ValueError):
        results = {}
    return results
Ejemplo n.º 50
0
def smart_config(conf):
    # list existing compute nodes
    json_conf = jsonutils.load(open(conf))

    compute_nodes = json_conf['compute']
    network_controllers = json_conf['network']

    for c in compute_nodes:
        print("[+]\tFound Compute {}".format(c))

    for c in network_controllers:
        print("[+]\tFound Network {}".format(c))

    # create a role and user per compute node and per network node
    for compute_node, param in compute_nodes.items():
        rolename = param['role']
        username = param['username']
        password = param['password']
        create_role(rolename)
        create_user(username, password)
        set_user_role(username, rolename)
        print("[+] creating user '{}', role '{}'".format(
            username, rolename))
        # PERMISSIONS
        set_role_permission(
            rolename,
            "/networking-vpp/nodes/{}/*".format(compute_node), "read")
        set_role_permission(
            rolename,
            "/networking-vpp/state/{}/*".format(compute_node), "readwrite")

    for network_controller, param in network_controllers.items():
        rolename = param['role']
        username = param['username']
        password = param['password']
        create_role(rolename)
        create_user(username, password)
        print("[+] creating user '{}', role '{}'".format(
            username, rolename))

        set_user_role(username, rolename)
        # PERMISSION
        set_role_permission(
            rolename,
            "/networking-vpp/nodes/*", "readwrite")
        set_role_permission(
            rolename,
            "/networking-vpp/state/*", "read")

    if click.confirm('Enable ETCD authentication ?'):
        print("[*] Enabling ETCD authentication")
        enable_authentication()
Ejemplo n.º 51
0
 def _load_json(self):
     try:
         json_file = open(SAVED_STATE_JSON)
         self.json_data = json.load(json_file)
         json_file.close()
     except IOError as ex:
         LOG.exception("Failed loading saved state, please be sure you"
                       " have first run cleanup with --init-saved-state "
                       "flag prior to running tempest. Exception: %s" % ex)
         sys.exit(ex)
     except Exception as ex:
         LOG.exception("Exception parsing saved state json : %s" % ex)
         sys.exit(ex)
Ejemplo n.º 52
0
    def _load_json(self, saved_state_json=SAVED_STATE_JSON):
        try:
            with open(saved_state_json, 'rb') as json_file:
                self.json_data = json.load(json_file)

        except IOError as ex:
            LOG.exception("Failed loading saved state, please be sure you"
                          " have first run cleanup with --init-saved-state "
                          "flag prior to running tempest. Exception: %s", ex)
            sys.exit(ex)
        except Exception as ex:
            LOG.exception("Exception parsing saved state json : %s", ex)
            sys.exit(ex)
Ejemplo n.º 53
0
def _resolve_ref(ref, base_path):
    file_path, _, json_path = ref.partition('#')
    if json_path:
        raise NotImplementedError('JSON refs with JSON path after the "#" is '
                                  'not yet supported')

    path = os.path.join(base_path, file_path)
    # binary mode is needed due to bug/1515231
    with open(path, 'r+b') as f:
        ref_value = jsonutils.load(f)
        base_path = os.path.dirname(path)
        res = resolve_refs(ref_value, base_path)
        return res
Ejemplo n.º 54
0
    def body_dict(self):
        """
        Returns the body content as a dictonary, deserializing per the
        Content-Type header.

        We add this method to ease future XML support, so the main code
        is not hardcoded to call pecans "request.json()" method.
        """
        if self.content_type in JSON_TYPES:
            try:
                return jsonutils.load(self.body_file)
            except ValueError as valueError:
                raise exceptions.InvalidJson(six.text_type(valueError))
        else:
            raise exceptions.UnsupportedContentType("Content-type must be application/json")
Ejemplo n.º 55
0
def main(actions=None):
    try:
        if CONF.input_data:
            data = json.loads(CONF.input_data)
        else:
            with open(CONF.input_data_file) as f:
                data = json.load(f)
        LOG.debug('Input data: %s', data)

        mgr = manager.Manager(data)
        if actions:
            for action in actions:
                getattr(mgr, action)()
    except Exception as exc:
        handle_exception(exc)
    def _read_current_data(self):
        with open(self.CURRENT_RUN_DATA_PATH) as file:
            data = jsonutils.load(file)

        tests = self._flatten_json_dictionary(data)

        now = datetime.now(tz=pytz.utc)

        current_test_data = dict(date=now.isoformat())

        for key, test in tests:
            if key not in self.column_names:
                self.column_names.append(key)

            current_test_data[key] = sum(
                [t[1]['expect_time'] for t in test.items()]
            )

        self.report_data.append(current_test_data)
Ejemplo n.º 57
0
    def check_scale(self):
        scale_ops = []
        ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups
        if self.testcase.get('scaling'):
            scale_ops = self.testcase['scaling']
        else:
            scale_path = os.path.join(self.template_path, 'scale.json')
            if os.path.exists(scale_path):
                with open(scale_path) as data:
                    scale_ops = json.load(data)

        body = {}
        for op in scale_ops:
            node_scale = op['node_group']
            if op['operation'] == 'add':
                if 'add_node_groups' not in body:
                    body['add_node_groups'] = []
                body['add_node_groups'].append({
                    'node_group_template_id':
                    self.ng_id_map.get(node_scale,
                                       self.sahara.get_node_group_template_id(
                                           node_scale)),
                    'count': op['size'],
                    'name': utils.rand_name(node_scale)
                })
            if op['operation'] == 'resize':
                if 'resize_node_groups' not in body:
                    body['resize_node_groups'] = []
                body['resize_node_groups'].append({
                    'name': self.ng_name_map.get(
                        node_scale,
                        self.sahara.get_node_group_template_id(node_scale)),
                    'count': op['size']
                })

        if body:
            self.sahara.scale_cluster(self.cluster_id, body)
            self._poll_cluster_status(self.cluster_id)
            ng_after_scale = self.sahara.get_cluster(
                self.cluster_id).node_groups
            self._validate_scaling(ng_after_scale,
                                   self._get_expected_count_of_nodes(
                                       ng_before_scale, body))