Exemplo n.º 1
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        cluster = utils.get_resource(
            client.clusters, parsed_args.cluster)

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError(
                    'An error occurred when reading '
                    'template from file %s: %s' % (parsed_args.json, e))

            data = client.clusters.scale(cluster.id, template).to_dict()
        else:
            scale_object = {
                "add_node_groups": [],
                "resize_node_groups": []
            }
            scale_node_groups = dict(
                map(lambda x: x.split(':', 1), parsed_args.node_groups))
            cluster_node_groups = [ng['name'] for ng in cluster.node_groups]
            for name, count in scale_node_groups.items():
                ng = utils.get_resource(client.node_group_templates, name)
                if ng.name in cluster_node_groups:
                    scale_object["resize_node_groups"].append({
                        "name": ng.name,
                        "count": int(count)
                    })
                else:
                    scale_object["add_node_groups"].append({
                        "node_group_template_id": ng.id,
                        "name": ng.name,
                        "count": int(count)
                    })
            if not scale_object['add_node_groups']:
                del scale_object['add_node_groups']
            if not scale_object['resize_node_groups']:
                del scale_object['resize_node_groups']

            data = client.clusters.scale(cluster.id, scale_object).cluster

        if parsed_args.wait:
            if not osc_utils.wait_for_status(
                    client.clusters.get, data['id']):
                self.log.error(
                    'Error occurred during cluster scaling: %s',
                    cluster.id)
            data = client.clusters.get(cluster.id).to_dict()

        _format_cluster_output(data)
        data = utils.prepare_data(data, CLUSTER_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 2
0
    def test_get_resource(self):
        manager = mock.Mock()

        # check case when resource id is passed
        uuid = '82065b4d-2c79-420d-adc3-310de275e922'
        utils.get_resource(manager, uuid)
        manager.get.assert_called_once_with(uuid)

        # check case when resource name is passed
        utils.get_resource(manager, 'name')
        manager.find_unique.assert_called_once_with(name='name')
Exemplo n.º 3
0
    def test_get_resource(self):
        manager = mock.Mock()

        # check case when resource id is passed
        uuid = '82065b4d-2c79-420d-adc3-310de275e922'
        utils.get_resource(manager, uuid)
        manager.get.assert_called_once_with(uuid)

        # check case when resource name is passed
        utils.get_resource(manager, 'name')
        manager.find_unique.assert_called_once_with(name='name')
Exemplo n.º 4
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for ds in parsed_args.data_source:
         data_source_id = utils.get_resource(
             client.data_sources, ds).id
         client.data_sources.delete(data_source_id)
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for ct in parsed_args.cluster_template:
         ct_id = utils.get_resource(
             client.cluster_templates, ct).id
         client.cluster_templates.delete(ct_id)
Exemplo n.º 6
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for jt in parsed_args.job_template:
         jt_id = utils.get_resource(
             client.jobs, jt).id
         client.jobs.delete(jt_id)
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for ngt in parsed_args.node_group_template:
         ngt_id = utils.get_resource(
             client.node_group_templates, ngt).id
         client.node_group_templates.delete(ngt_id)
Exemplo n.º 8
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(
            client.data_sources, parsed_args.data_source).to_dict()
        data = utils.prepare_data(data, DATA_SOURCE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 9
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.data_sources,
                                  parsed_args.data_source).to_dict()
        data = utils.prepare_data(data, DATA_SOURCE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 10
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)", parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.job_binaries,
                                  parsed_args.job_binary).to_dict()

        data = utils.prepare_data(data, JOB_BINARY_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 11
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(
            client.job_binaries, parsed_args.job_binary).to_dict()

        data = utils.prepare_data(data, JOB_BINARY_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 12
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)", parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.images, parsed_args.image).to_dict()
        data['tags'] = osc_utils.format_list(data['tags'])

        data = utils.prepare_data(data, IMAGE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 13
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(
            client.images, parsed_args.image).to_dict()
        data['tags'] = osc_utils.format_list(data['tags'])

        data = utils.prepare_data(data, IMAGE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 14
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)", parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.cluster_templates,
                                  parsed_args.cluster_template).to_dict()

        _format_ct_output(data)
        data = utils.prepare_data(data, CT_FIELDS)

        return self.dict2columns(data)
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(
            client.cluster_templates, parsed_args.cluster_template).to_dict()

        _format_ct_output(data)
        data = utils.prepare_data(data, CT_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 16
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.jobs,
                                  parsed_args.job_template).to_dict()

        _format_job_template_output(data)
        data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 17
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.data_sources,
                                  parsed_args.data_source).to_dict()

        fields = ['name', 'id', 'type', 'url', 'description']
        data = utils.prepare_data(data, fields)

        return self.dict2columns(data)
Exemplo n.º 18
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(
            client.images, parsed_args.image).to_dict()
        data['tags'] = osc_utils.format_list(data['tags'])

        fields = ['name', 'id', 'username', 'tags', 'status', 'description']
        data = utils.prepare_data(data, fields)

        return self.dict2columns(data)
Exemplo n.º 19
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = jsonutils.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError(
                    'An error occurred when reading '
                    'template from file %s: %s' % (parsed_args.json, e))
            data = client.jobs.create(**template).to_dict()
        else:
            if parsed_args.interface:
                blob = osc_utils.read_blob_file_contents(parsed_args.json)
                try:
                    parsed_args.interface = jsonutils.loads(blob)
                except ValueError as e:
                    raise exceptions.CommandError(
                        'An error occurred when reading '
                        'interface from file %s: %s' % (parsed_args.json, e))

            mains_ids = [utils.get_resource(client.job_binaries, m).id for m
                         in parsed_args.mains] if parsed_args.mains else None
            libs_ids = [utils.get_resource(client.job_binaries, m).id for m
                        in parsed_args.libs] if parsed_args.libs else None

            data = client.jobs.create(
                name=parsed_args.name, type=parsed_args.type, mains=mains_ids,
                libs=libs_ids, description=parsed_args.description,
                interface=parsed_args.interface, is_public=parsed_args.public,
                is_protected=parsed_args.protected).to_dict()

        _format_job_template_output(data)
        data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 20
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        image = utils.get_resource(client.images, parsed_args.image)
        parsed_args.tags.extend(image.tags)
        data = client.images.update_tags(image.id, list(set(
            parsed_args.tags))).to_dict()

        data['tags'] = osc_utils.format_list(data['tags'])

        data = utils.prepare_data(data, IMAGE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 21
0
    def test_get_resource_id(self):
        class TestResource(object):
            def __init__(self, id):
                self.id = id

        class TestManager(object):

            resource_class = TestResource

            def get(self, id):
                if id == 'id':
                    return TestResource('from_id')
                else:
                    raise

            def find(self, name):
                if name == 'name':
                    return [TestResource('from_name')]
                if name == 'null':
                    return []
                if name == 'mult':
                    return [TestResource('1'), TestResource('2')]

        # check case when resource id is passed
        self.assertEqual('from_id', utils.get_resource(TestManager(), 'id').id)

        # check case when resource name is passed
        self.assertEqual('from_name',
                         utils.get_resource(TestManager(), 'name').id)

        # check that error is raised when resource doesn't exists
        self.assertRaises(exceptions.CommandError, utils.get_resource,
                          TestManager(), 'null')

        # check that error is raised when multiple resources choice
        self.assertRaises(exceptions.CommandError, utils.get_resource,
                          TestManager(), 'mult')
Exemplo n.º 22
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     clusters = []
     for cluster in parsed_args.cluster:
         cluster_id = utils.get_resource(
             client.clusters, cluster).id
         client.clusters.delete(cluster_id)
         clusters.append(cluster_id)
     if parsed_args.wait:
         for cluster_id in clusters:
             if not utils.wait_for_delete(client.clusters, cluster_id):
                 self.log.error(
                     'Error occurred during cluster deleting: %s',
                     cluster_id)
Exemplo n.º 23
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        data = utils.get_resource(client.clusters,
                                  parsed_args.cluster).to_dict()

        _format_cluster_output(data)
        fields = []
        if parsed_args.verification:
            ver_data, fields = _prepare_health_checks(data)
            data.update(ver_data)
        fields.extend(CLUSTER_FIELDS)
        data = utils.prepare_data(data, fields)

        return self.dict2columns(data)
Exemplo n.º 24
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)", parsed_args)
     client = self.app.client_manager.data_processing
     for jb in parsed_args.job_binary:
         jb = utils.get_resource(client.job_binaries, jb)
         if jb.url.startswith("internal-db"):
             jbi_id = jb.url.replace('internal-db://', '')
             try:
                 client.job_binary_internals.delete(jbi_id)
             except base.APIException as ex:
                 # check if job binary internal was already deleted for
                 # some reasons
                 if not ex.error_code == '404':
                     raise
         client.job_binaries.delete(jb.id)
         sys.stdout.write('Job binary "{jb}" has been removed '
                          'successfully.\n'.format(jb=jb))
Exemplo n.º 25
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)", parsed_args)
        client = self.app.client_manager.data_processing

        image = utils.get_resource(client.images, parsed_args.image)

        if parsed_args.all:
            data = client.images.update_tags(image.id, []).to_dict()
        else:
            parsed_args.tags = parsed_args.tags or []
            new_tags = list(set(image.tags) - set(parsed_args.tags))
            data = client.images.update_tags(image.id, new_tags).to_dict()

        data['tags'] = osc_utils.format_list(data['tags'])

        data = utils.prepare_data(data, IMAGE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 26
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for jb in parsed_args.job_binary:
         jb = utils.get_resource(client.job_binaries, jb)
         if jb.url.startswith("internal-db"):
             jbi_id = jb.url.replace('internal-db://', '')
             try:
                 client.job_binary_internals.delete(jbi_id)
             except base.APIException as ex:
                 # check if job binary internal was already deleted for
                 # some reasons
                 if not ex.error_code == '404':
                     raise
         client.job_binaries.delete(jb.id)
         sys.stdout.write(
             'Job binary "{jb}" has been removed '
             'successfully.\n'.format(jb=jb))
Exemplo n.º 27
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        image = utils.get_resource(client.images, parsed_args.image)

        if parsed_args.all:
            data = client.images.update_tags(image.id, []).to_dict()
        else:
            parsed_args.tags = parsed_args.tags or []
            new_tags = list(set(image.tags) - set(parsed_args.tags))
            data = client.images.update_tags(image.id, new_tags).to_dict()

        data['tags'] = osc_utils.format_list(data['tags'])

        data = utils.prepare_data(data, IMAGE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 28
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        cluster_id = utils.get_resource(
            client.clusters, parsed_args.cluster).id

        data = client.clusters.update(
            cluster_id,
            name=parsed_args.name,
            description=parsed_args.description,
            is_public=parsed_args.is_public,
            is_protected=parsed_args.is_protected
        ).cluster

        _format_cluster_output(data)
        data = utils.prepare_data(data, CLUSTER_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 29
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        update_fields = utils.create_dict_from_kwargs(
            name=parsed_args.name,
            description=parsed_args.description,
            data_source_type=parsed_args.type, url=parsed_args.url,
            credential_user=parsed_args.username,
            credential_pass=parsed_args.password,
            is_public=parsed_args.is_public,
            is_protected=parsed_args.is_protected)

        ds_id = utils.get_resource(
            client.data_sources, parsed_args.data_source).id
        data = client.data_sources.update(ds_id, update_fields).data_source
        data = utils.prepare_data(data, DATA_SOURCE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 30
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        jt_id = utils.get_resource(
            client.jobs, parsed_args.job_template).id

        data = client.jobs.update(
            jt_id,
            name=parsed_args.name,
            description=parsed_args.description,
            is_public=parsed_args.is_public,
            is_protected=parsed_args.is_protected
        ).job

        _format_job_template_output(data)
        data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 31
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        kwargs = {}
        if parsed_args.show_progress or parsed_args.full_dump_events:
            kwargs['show_progress'] = True
        data = utils.get_resource(client.clusters, parsed_args.cluster,
                                  **kwargs).to_dict()
        provision_steps = data.get('provision_progress', [])
        provision_steps = utils.created_at_sorted(provision_steps)

        if parsed_args.full_dump_events:
            file_name = utils.random_name('event-logs')
            # making full dump
            with open(file_name, 'w') as file:
                jsonutils.dump(provision_steps, file, indent=4)
            sys.stdout.write('Event log dump saved to file: %s\n' % file_name)

        _format_cluster_output(data)
        fields = []
        if parsed_args.verification:
            ver_data, fields = _prepare_health_checks(data)
            data.update(ver_data)
        fields.extend(CLUSTER_FIELDS)

        data = self.dict2columns(utils.prepare_data(data, fields))

        if parsed_args.show_progress:
            output_steps = []
            for step in provision_steps:
                st_name, st_type = step['step_name'], step['step_type']
                description = "%s: %s" % (st_type, st_name)
                if step['successful'] is None:
                    progress = "Step in progress"
                elif step['successful']:
                    progress = "Step completed successfully"
                else:
                    progress = 'Step has failed events'
                output_steps += [(description, progress)]
            data = utils.extend_columns(data, output_steps)

        return data
def _configure_node_groups(node_groups, client):
    node_groups_list = dict(
        map(lambda x: x.split(':', 1), node_groups))

    node_groups = []
    plugins_versions = set()

    for name, count in node_groups_list.items():
        ng = utils.get_resource(client.node_group_templates, name)
        node_groups.append({'name': ng.name,
                            'count': int(count),
                            'node_group_template_id': ng.id})
        plugins_versions.add((ng.plugin_name, ng.hadoop_version))

    if len(plugins_versions) != 1:
        raise exceptions.CommandError('Node groups with the same plugins '
                                      'and versions must be specified')

    plugin, version = plugins_versions.pop()
    return plugin, version, node_groups
def _configure_node_groups(node_groups, client):
    node_groups_list = dict(
        map(lambda x: x.split(':', 1), node_groups))

    node_groups = []
    plugins_versions = set()

    for name, count in node_groups_list.items():
        ng = utils.get_resource(client.node_group_templates, name)
        node_groups.append({'name': ng.name,
                            'count': int(count),
                            'node_group_template_id': ng.id})
        plugins_versions.add((ng.plugin_name, ng.hadoop_version))

    if len(plugins_versions) != 1:
        raise exceptions.CommandError('Node groups with the same plugins '
                                      'and versions must be specified')

    plugin, plugin_version = plugins_versions.pop()
    return plugin, plugin_version, node_groups
Exemplo n.º 34
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        if parsed_args.show:
            data = utils.get_resource(client.clusters,
                                      parsed_args.cluster).to_dict()
            ver_data, ver_fields = _prepare_health_checks(data)
            data = utils.prepare_data(ver_data, ver_fields)
            return self.dict2columns(data)
        else:
            cluster_id = utils.get_resource_id(client.clusters,
                                               parsed_args.cluster)
            client.clusters.verification_update(cluster_id, parsed_args.status)
            if parsed_args.status == 'START':
                print_status = 'started'
            sys.stdout.write(
                'Cluster "{cluster}" health verification has been '
                '{status}.'.format(cluster=parsed_args.cluster,
                                   status=print_status))

            return {}, {}
Exemplo n.º 35
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     data_source_id = utils.get_resource(client.data_sources,
                                         parsed_args.data_source).id
     client.data_sources.delete(data_source_id)
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        ngt_id = utils.get_resource(
            client.node_group_templates, parsed_args.node_group_template).id

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError(
                    'An error occurred when reading '
                    'template from file %s: %s' % (parsed_args.json, e))
            data = client.node_group_templates.update(
                ngt_id, **template).to_dict()
        else:
            configs = None
            if parsed_args.configs:
                blob = osc_utils.read_blob_file_contents(parsed_args.configs)
                try:
                    configs = json.loads(blob)
                except ValueError as e:
                    raise exceptions.CommandError(
                        'An error occurred when reading '
                        'configs from file %s: %s' % (parsed_args.configs, e))

            shares = None
            if parsed_args.shares:
                blob = osc_utils.read_blob_file_contents(parsed_args.shares)
                try:
                    shares = json.loads(blob)
                except ValueError as e:
                    raise exceptions.CommandError(
                        'An error occurred when reading '
                        'shares from file %s: %s' % (parsed_args.shares, e))

            data = client.node_group_templates.update(
                ngt_id,
                name=parsed_args.name,
                plugin_name=parsed_args.plugin,
                hadoop_version=parsed_args.version,
                flavor_id=parsed_args.flavor,
                description=parsed_args.description,
                volumes_per_node=parsed_args.volumes_per_node,
                volumes_size=parsed_args.volumes_size,
                node_processes=parsed_args.processes,
                floating_ip_pool=parsed_args.floating_ip_pool,
                security_groups=parsed_args.security_groups,
                auto_security_group=parsed_args.use_auto_security_group,
                availability_zone=parsed_args.availability_zone,
                volume_type=parsed_args.volumes_type,
                is_proxy_gateway=parsed_args.is_proxy_gateway,
                volume_local_to_instance=parsed_args.volume_locality,
                use_autoconfig=parsed_args.use_autoconfig,
                is_public=parsed_args.is_public,
                is_protected=parsed_args.is_protected,
                node_configs=configs,
                shares=shares,
                volumes_availability_zone=parsed_args.volumes_availability_zone
            ).to_dict()

        _format_ngt_output(data)
        data = utils.prepare_data(data, NGT_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 37
0
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     client = self.app.client_manager.data_processing
     for image in parsed_args.image:
         image_id = utils.get_resource(client.images, image).id
         client.images.unregister_image(image_id)
Exemplo n.º 38
0
def _get_plugin_version(cluster_template, client):
    ct = utils.get_resource(client.cluster_templates, cluster_template)
    return ct.plugin_name, ct.hadoop_version, ct.id
Exemplo n.º 39
0
def _get_plugin_version(cluster_template, client):
    ct = utils.get_resource(client.cluster_templates, cluster_template)
    return ct.plugin_name, ct.hadoop_version, ct.id
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        ct_id = utils.get_resource(
            client.cluster_templates, parsed_args.cluster_template).id

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError(
                    'An error occurred when reading '
                    'template from file %s: %s' % (parsed_args.json, e))
            data = client.cluster_templates.update(
                ct_id, **template).to_dict()
        else:
            plugin, version, node_groups = None, None, None
            if parsed_args.node_groups:
                plugin, version, node_groups = _configure_node_groups(
                    parsed_args.node_groups, client)

            configs = None
            if parsed_args.configs:
                blob = osc_utils.read_blob_file_contents(parsed_args.configs)
                try:
                    configs = json.loads(blob)
                except ValueError as e:
                    raise exceptions.CommandError(
                        'An error occurred when reading '
                        'configs from file %s: %s' % (parsed_args.configs, e))

            shares = None
            if parsed_args.shares:
                blob = osc_utils.read_blob_file_contents(parsed_args.shares)
                try:
                    shares = json.loads(blob)
                except ValueError as e:
                    raise exceptions.CommandError(
                        'An error occurred when reading '
                        'shares from file %s: %s' % (parsed_args.shares, e))

            data = client.cluster_templates.update(
                ct_id,
                name=parsed_args.name,
                plugin_name=plugin,
                hadoop_version=version,
                description=parsed_args.description,
                node_groups=node_groups,
                use_autoconfig=parsed_args.use_autoconfig,
                cluster_configs=configs,
                shares=shares,
                is_public=parsed_args.is_public,
                is_protected=parsed_args.is_protected
            ).to_dict()

        _format_ct_output(data)
        data = utils.prepare_data(data, CT_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 41
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing
        network_client = self.app.client_manager.network

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError(
                    'An error occurred when reading '
                    'template from file %s: %s' % (parsed_args.json, e))

            if 'neutron_management_network' in template:
                template['net_id'] = template.pop('neutron_management_network')

            if 'count' in template:
                parsed_args.count = template['count']

            data = client.clusters.create(**template).to_dict()
        else:
            if not parsed_args.name or not parsed_args.cluster_template \
                    or not parsed_args.image:
                raise exceptions.CommandError(
                    'At least --name , --cluster-template, --image arguments '
                    'should be specified or json template should be provided '
                    'with --json argument')

            plugin, version, template_id = _get_plugin_version(
                parsed_args.cluster_template, client)

            image_id = utils.get_resource(client.images, parsed_args.image).id

            net_id = (network_client.api.find_attr(
                'networks', parsed_args.neutron_network)['id'] if
                parsed_args.neutron_network else None)

            data = client.clusters.create(
                name=parsed_args.name,
                plugin_name=plugin,
                hadoop_version=version,
                cluster_template_id=template_id,
                default_image_id=image_id,
                description=parsed_args.description,
                is_transient=parsed_args.transient,
                user_keypair_id=parsed_args.user_keypair,
                net_id=net_id,
                count=parsed_args.count,
                is_public=parsed_args.public,
                is_protected=parsed_args.protected
            ).to_dict()
        if parsed_args.count and parsed_args.count > 1:
            clusters = [
                utils.get_resource(client.clusters, id)
                for id in data['clusters']]

            if parsed_args.wait:
                for cluster in clusters:
                    if not osc_utils.wait_for_status(
                            client.clusters.get, cluster.id):
                        self.log.error(
                            'Error occurred during cluster creation: %s',
                            data['id'])

            data = {}
            for cluster in clusters:
                data[cluster.name] = cluster.id

        else:
            if parsed_args.wait:
                if not osc_utils.wait_for_status(
                        client.clusters.get, data['id']):
                    self.log.error(
                        'Error occurred during cluster creation: %s',
                        data['id'])
                data = client.clusters.get(data['id']).to_dict()
            _format_cluster_output(data)
            data = utils.prepare_data(data, CLUSTER_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 42
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing
        network_client = self.app.client_manager.network

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError('An error occurred when reading '
                                              'template from file %s: %s' %
                                              (parsed_args.json, e))

            if 'neutron_management_network' in template:
                template['net_id'] = template.pop('neutron_management_network')

            if 'count' in template:
                parsed_args.count = template['count']

            data = client.clusters.create(**template).to_dict()
        else:
            if not parsed_args.name or not parsed_args.cluster_template \
                    or not parsed_args.image:
                raise exceptions.CommandError(
                    'At least --name , --cluster-template, --image arguments '
                    'should be specified or json template should be provided '
                    'with --json argument')

            plugin, plugin_version, template_id = _get_plugin_version(
                parsed_args.cluster_template, client)

            image_id = utils.get_resource_id(client.images, parsed_args.image)

            net_id = (network_client.find_network(parsed_args.neutron_network,
                                                  ignore_missing=False).id
                      if parsed_args.neutron_network else None)

            data = client.clusters.create(
                name=parsed_args.name,
                plugin_name=plugin,
                hadoop_version=plugin_version,
                cluster_template_id=template_id,
                default_image_id=image_id,
                description=parsed_args.description,
                is_transient=parsed_args.transient,
                user_keypair_id=parsed_args.user_keypair,
                net_id=net_id,
                count=parsed_args.count,
                is_public=parsed_args.public,
                is_protected=parsed_args.protected).to_dict()
        if parsed_args.count and parsed_args.count > 1:
            clusters = [
                utils.get_resource(client.clusters, id)
                for id in data['clusters']
            ]

            if parsed_args.wait:
                for cluster in clusters:
                    if not osc_utils.wait_for_status(client.clusters.get,
                                                     cluster.id):
                        self.log.error(
                            'Error occurred during cluster creation: %s' %
                            data['id'])

            data = {}
            for cluster in clusters:
                data[cluster.name] = cluster.id

        else:
            if parsed_args.wait:
                if not osc_utils.wait_for_status(client.clusters.get,
                                                 data['id']):
                    self.log.error(
                        'Error occurred during cluster creation: %s' %
                        data['id'])
                data = client.clusters.get(data['id']).to_dict()
            _format_cluster_output(data)
            data = utils.prepare_data(data, CLUSTER_FIELDS)

        return self.dict2columns(data)
Exemplo n.º 43
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.data_processing

        cluster = utils.get_resource(client.clusters, parsed_args.cluster)

        if parsed_args.json:
            blob = osc_utils.read_blob_file_contents(parsed_args.json)
            try:
                template = json.loads(blob)
            except ValueError as e:
                raise exceptions.CommandError('An error occurred when reading '
                                              'template from file %s: %s' %
                                              (parsed_args.json, e))

            data = client.clusters.scale(cluster.id, template).to_dict()
        else:
            scale_object = {"add_node_groups": [], "resize_node_groups": []}
            scale_node_groups = dict(
                map(lambda x: x.split(':', 1), parsed_args.instances))
            cluster_ng_map = {
                ng['node_group_template_id']: ng['name']
                for ng in cluster.node_groups
            }
            for name, count in scale_node_groups.items():
                ngt = utils.get_resource(client.node_group_templates, name)
                if ngt.id in cluster_ng_map:
                    scale_object["resize_node_groups"].append({
                        "name":
                        cluster_ng_map[ngt.id],
                        "count":
                        int(count)
                    })
                else:
                    scale_object["add_node_groups"].append({
                        "node_group_template_id":
                        ngt.id,
                        "name":
                        ngt.name,
                        "count":
                        int(count)
                    })
            if not scale_object['add_node_groups']:
                del scale_object['add_node_groups']
            if not scale_object['resize_node_groups']:
                del scale_object['resize_node_groups']

            data = client.clusters.scale(cluster.id, scale_object).cluster

        sys.stdout.write(
            'Cluster "{cluster}" scaling has been started.\n'.format(
                cluster=parsed_args.cluster))
        if parsed_args.wait:
            if not osc_utils.wait_for_status(client.clusters.get, data['id']):
                self.log.error('Error occurred during cluster scaling: %s' %
                               cluster.id)
            data = client.clusters.get(cluster.id).to_dict()

        _format_cluster_output(data)
        data = utils.prepare_data(data, CLUSTER_FIELDS)

        return self.dict2columns(data)