def test_wait_for_status_error(self, mock_sleep): # Tests that we fail if the resource is status=error resource = mock.MagicMock(status="ERROR") status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertFalse(utils.wait_for_status(status_f, res_id)) mock_sleep.assert_not_called()
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server, ) if parsed_args.flavor: flavor = utils.find_resource( compute_client.flavors, parsed_args.flavor, ) server.resize(flavor) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, success_status=['active', 'verify_resize'], callback=_show_progress, ): sys.stdout.write('Complete\n') else: sys.stdout.write('\nError resizing server') raise SystemExit elif parsed_args.verify: server.confirm_resize() elif parsed_args.revert: server.revert_resize()
def test_wait_for_status_ok(self, mock_sleep): # Tests the normal flow that the resource is status=active resource = mock.MagicMock(status="ACTIVE") status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertTrue(utils.wait_for_status(status_f, res_id)) mock_sleep.assert_not_called()
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server, ) if parsed_args.live: server.live_migrate( parsed_args.live, parsed_args.shared_migration, parsed_args.disk_overcommit, ) else: server.migrate() if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write(_('Complete\n')) else: sys.stdout.write(_('\nError migrating server')) raise SystemExit
def test_wait_for_status_ok__with_overrides(self, mock_sleep): # Tests the normal flow that the resource is status=complete resource = mock.MagicMock(my_status="COMPLETE") status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertTrue(utils.wait_for_status(status_f, res_id, status_field="my_status", success_status=["complete"])) mock_sleep.assert_not_called()
def _stack_action(parsed_args, heat_client, action, good_status, bad_status): rows = [] for stack in parsed_args.stack: try: action(stack) except heat_exc.HTTPNotFound: msg = _('Stack not found: %s') % stack raise exc.CommandError(msg) if parsed_args.wait: if not utils.wait_for_status(heat_client.stacks.get, stack, status_field='stack_status', success_status=good_status, error_status=bad_status): err = _("Error waiting for status from stack %s") % stack raise exc.CommandError(err) data = heat_client.stacks.get(stack) columns = [ 'ID', 'Stack Name', 'Stack Status', 'Creation Time', 'Updated Time' ] rows += [utils.get_dict_properties(data.to_dict(), columns)] return (columns, rows)
def test_wait_for_status_error_with_overrides(self, mock_sleep): # Tests that we fail if the resource is my_status=failed resource = mock.MagicMock(my_status="FAILED") status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertFalse(utils.wait_for_status(status_f, res_id, status_field="my_status", error_status=["failed"])) mock_sleep.assert_not_called()
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server, ) if parsed_args.live: server.live_migrate( parsed_args.live, parsed_args.shared_migration, parsed_args.disk_overcommit, ) else: server.migrate() if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('Complete\n') else: sys.stdout.write('\nError migrating server') raise SystemExit
def test_wait_for_status_error(self, mock_sleep): # Tests that we fail if the resource is status=error resource = mock.MagicMock(status='ERROR') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertFalse(utils.wait_for_status(status_f, res_id)) mock_sleep.assert_not_called()
def test_wait_for_status_ok(self, mock_sleep): # Tests the normal flow that the resource is status=active resource = mock.MagicMock(status='ACTIVE') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertTrue(utils.wait_for_status(status_f, res_id,)) mock_sleep.assert_not_called()
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server, ) if parsed_args.flavor: flavor = utils.find_resource( compute_client.flavors, parsed_args.flavor, ) compute_client.servers.resize(server, flavor) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, success_status=['active', 'verify_resize'], callback=_show_progress, ): sys.stdout.write(_('Complete\n')) else: sys.stdout.write(_('\nError resizing server')) raise SystemExit elif parsed_args.confirm: compute_client.servers.confirm_resize(server) elif parsed_args.revert: compute_client.servers.revert_resize(server)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) adopt_url = heat_utils.normalise_file_path_to_url( parsed_args.adopt_file) adopt_data = request.urlopen(adopt_url).read().decode('utf-8') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'adopt_stack_data': adopt_data, 'parameters': heat_utils.format_parameters(parsed_args.parameter), 'files': dict(list(env_files.items())), 'environment': env, 'timeout': parsed_args.timeout } stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: if not utils.wait_for_status(client.stacks.get, parsed_args.name, status_field='stack_status', success_status='create_complete', error_status=['create_failed']): msg = _('Stack %s failed to create.') % parsed_args.name raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def test_wait_for_status_error_with_overrides(self, mock_sleep): # Tests that we fail if the resource is my_status=failed resource = mock.MagicMock(my_status='FAILED') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertFalse(utils.wait_for_status(status_f, res_id, status_field='my_status', error_status=['failed'])) self.assertFalse(mock_sleep.called)
def test_wait_for_status_ok__with_overrides(self, mock_sleep): # Tests the normal flow that the resource is status=complete resource = mock.MagicMock(my_status='COMPLETE') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertTrue(utils.wait_for_status(status_f, res_id, status_field='my_status', success_status=['complete'])) self.assertFalse(mock_sleep.called)
def test_wait_for_status_error_with_overrides(self, mock_sleep): # Tests that we fail if the resource is my_status=failed resource = mock.MagicMock(my_status='FAILED') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertFalse(utils.wait_for_status(status_f, res_id, status_field='my_status', error_status=['failed'])) mock_sleep.assert_not_called()
def test_wait_for_status_ok__with_overrides(self, mock_sleep): # Tests the normal flow that the resource is status=complete resource = mock.MagicMock(my_status='COMPLETE') status_f = mock.Mock(return_value=resource) res_id = str(uuid.uuid4()) self.assertTrue(utils.wait_for_status(status_f, res_id, status_field='my_status', success_status=['complete'])) mock_sleep.assert_not_called()
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing cluster = utils.get_resource( client.clusters, parsed_args.cluster) if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: template = json.loads(blob) except ValueError as e: raise exceptions.CommandError( 'An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) data = client.clusters.scale(cluster.id, template).to_dict() else: scale_object = { "add_node_groups": [], "resize_node_groups": [] } scale_node_groups = dict( map(lambda x: x.split(':', 1), parsed_args.node_groups)) cluster_node_groups = [ng['name'] for ng in cluster.node_groups] for name, count in scale_node_groups.items(): ng = utils.get_resource(client.node_group_templates, name) if ng.name in cluster_node_groups: scale_object["resize_node_groups"].append({ "name": ng.name, "count": int(count) }) else: scale_object["add_node_groups"].append({ "node_group_template_id": ng.id, "name": ng.name, "count": int(count) }) if not scale_object['add_node_groups']: del scale_object['add_node_groups'] if not scale_object['resize_node_groups']: del scale_object['resize_node_groups'] data = client.clusters.scale(cluster.id, scale_object).cluster if parsed_args.wait: if not osc_utils.wait_for_status( client.clusters.get, data['id']): self.log.error( 'Error occurred during cluster scaling: %s', cluster.id) data = client.clusters.get(cluster.id).to_dict() _format_cluster_output(data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data)
def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server, ) # Set sane defaults as this API wants all mouths to be fed if parsed_args.name is None: backup_name = server.name else: backup_name = parsed_args.name if parsed_args.type is None: backup_type = "" else: backup_type = parsed_args.type if parsed_args.rotate is None: backup_rotation = 1 else: backup_rotation = parsed_args.rotate compute_client.servers.backup( server.id, backup_name, backup_type, backup_rotation, ) image_client = self.app.client_manager.image image = utils.find_resource( image_client.images, backup_name, ) if parsed_args.wait: if utils.wait_for_status( image_client.images.get, image.id, callback=_show_progress, ): sys.stdout.write('\n') else: msg = _('Error creating server backup: %s') % parsed_args.name raise exceptions.CommandError(msg) if self.app.client_manager._api_version['image'] == '1': info = {} info.update(image._info) info['properties'] = utils.format_dict(info.get('properties', {})) else: # Get the right image module to format the output image_module = importutils.import_module(self.IMAGE_API_VERSIONS[ self.app.client_manager._api_version['image']]) info = image_module._format_image(image) return zip(*sorted(six.iteritems(info)))
def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource(compute_client.servers, parsed_args.server) server.reboot(parsed_args.reboot_type) if parsed_args.wait: if utils.wait_for_status(compute_client.servers.get, server.id, callback=_show_progress): sys.stdout.write(_("\nReboot complete\n")) else: sys.stdout.write(_("\nError rebooting server\n")) raise SystemExit
def launch_instance(num): num = int(num) global index asg_flavor = nova_client.flavors.find(name=ASG_FLAVOR) asg_image = nova_client.images.find(name=ASG_IMAGE) net = nova_client.networks.find(label='private') ret= [] for i in range(0,num): ret.append(nova_client.servers.create(ASG_NAME+str(index), flavor=asg_flavor.id, image=asg_image.id, nics=[{'net-id':net.id}])) index = index + 1 for i in range(0, num): if not utils.wait_for_status(nova_client.servers.get, ret[i].id): print "failed to start new instance "+ str(i) else: ret[i] = nova_client.servers.find(id=ret[i].id) return ret
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=_authenticated_fetcher(client)) env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_create: template_utils.hooks_to_env(env, parsed_args.pre_create, 'pre-create') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'parameters': parameters, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: if not utils.wait_for_status(client.stacks.get, parsed_args.name, status_field='stack_status', success_status='create_complete', error_status='create_failed'): msg = _('Stack %s failed to create.') % parsed_args.name raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute server = utils.find_resource(compute_client.servers, parsed_args.server) server.reboot(parsed_args.reboot_type) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write(_('\nReboot complete\n')) else: sys.stdout.write(_('\nError rebooting server\n')) raise SystemExit
def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource(compute_client.servers, parsed_args.server) # If parsed_args.image is not set, default to the currently used one. image_id = parsed_args.image or server._info.get("image", {}).get("id") image = utils.find_resource(compute_client.images, image_id) server = server.rebuild(image, parsed_args.password) if parsed_args.wait: if utils.wait_for_status(compute_client.servers.get, server.id, callback=_show_progress): sys.stdout.write(_("\nComplete\n")) else: sys.stdout.write(_("\nError rebuilding server")) raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image server = utils.find_resource( compute_client.servers, parsed_args.server, ) if parsed_args.name: name = parsed_args.name else: name = server.name image_id = compute_client.servers.create_image( server, name, ) if parsed_args.wait: if utils.wait_for_status( image_client.images.get, image_id, callback=_show_progress, ): sys.stdout.write('\n') else: self.log.error( 'Error creating server snapshot: %s', parsed_args.image_name, ) sys.stdout.write('\nError creating server snapshot') raise SystemExit image = utils.find_resource( image_client.images, image_id, ) info = {} info.update(image._info) return zip(*sorted(six.iteritems(info)))
def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource(compute_client.servers, parsed_args.server) if parsed_args.flavor: flavor = utils.find_resource(compute_client.flavors, parsed_args.flavor) compute_client.servers.resize(server, flavor) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, success_status=["active", "verify_resize"], callback=_show_progress, ): sys.stdout.write(_("Complete\n")) else: sys.stdout.write(_("\nError resizing server")) raise SystemExit elif parsed_args.confirm: compute_client.servers.confirm_resize(server) elif parsed_args.revert: compute_client.servers.revert_resize(server)
def take_action(self, parsed_args): compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image server = utils.find_resource(compute_client.servers, parsed_args.server) if parsed_args.name: name = parsed_args.name else: name = server.name image_id = compute_client.servers.create_image(server, name) if parsed_args.wait: if utils.wait_for_status(image_client.images.get, image_id, callback=_show_progress): sys.stdout.write("\n") else: self.log.error(_("Error creating server snapshot: %s"), parsed_args.image_name) sys.stdout.write(_("\nError creating server snapshot")) raise SystemExit image = utils.find_resource(image_client.images, image_id) return zip(*sorted(six.iteritems(image._info)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute # Lookup parsed_args.image image = utils.find_resource(compute_client.images, parsed_args.image) server = utils.find_resource( compute_client.servers, parsed_args.server) server = server.rebuild(image, parsed_args.password) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('\nComplete\n') else: sys.stdout.write('\nError rebuilding server') raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute # Lookup parsed_args.image image = utils.find_resource(compute_client.images, parsed_args.image) server = utils.find_resource(compute_client.servers, parsed_args.server) server = server.rebuild(image, parsed_args.password) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write(_('\nComplete\n')) else: sys.stdout.write(_('\nError rebuilding server')) raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute volume_client = self.app.client_manager.volume # Lookup parsed_args.image image = None if parsed_args.image: image = utils.find_resource( compute_client.images, parsed_args.image, ) # Lookup parsed_args.volume volume = None if parsed_args.volume: volume = utils.find_resource( volume_client.volumes, parsed_args.volume, ).id # Lookup parsed_args.flavor flavor = utils.find_resource(compute_client.flavors, parsed_args.flavor) boot_args = [parsed_args.server_name, image, flavor] files = {} for f in parsed_args.file: dst, src = f.split('=', 1) try: files[dst] = io.open(src, 'rb') except IOError as e: raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) if parsed_args.min > parsed_args.max: msg = _("min instances should be <= max instances") raise exceptions.CommandError(msg) if parsed_args.min < 1: msg = _("min instances should be > 0") raise exceptions.CommandError(msg) if parsed_args.max < 1: msg = _("max instances should be > 0") raise exceptions.CommandError(msg) userdata = None if parsed_args.user_data: try: userdata = io.open(parsed_args.user_data) except IOError as e: msg = "Can't open '%s': %s" raise exceptions.CommandError(msg % (parsed_args.user_data, e)) block_device_mapping = {} if volume: # When booting from volume, for now assume no other mappings # This device value is likely KVM-specific block_device_mapping = {'vda': volume} else: for dev_map in parsed_args.block_device_mapping: dev_key, dev_vol = dev_map.split('=', 1) block_volume = None if dev_vol: block_volume = utils.find_resource( volume_client.volumes, dev_vol, ).id block_device_mapping.update({dev_key: block_volume}) nics = [] if parsed_args.nic: neutron_enabled = self._is_neutron_enabled() for nic_str in parsed_args.nic: nic_info = { "net-id": "", "v4-fixed-ip": "", "v6-fixed-ip": "", "port-id": "" } nic_info.update( dict(kv_str.split("=", 1) for kv_str in nic_str.split(","))) if bool(nic_info["net-id"]) == bool(nic_info["port-id"]): msg = _("either net-id or port-id should be specified " "but not both") raise exceptions.CommandError(msg) if neutron_enabled: network_client = self.app.client_manager.network if nic_info["net-id"]: nic_info["net-id"] = common.find(network_client, 'network', 'networks', nic_info["net-id"]) if nic_info["port-id"]: nic_info["port-id"] = common.find(network_client, 'port', 'ports', nic_info["port-id"]) else: if nic_info["net-id"]: nic_info["net-id"] = utils.find_resource( compute_client.networks, nic_info["net-id"]).id if nic_info["port-id"]: msg = _("can't create server with port specified " "since neutron not enabled") raise exceptions.CommandError(msg) nics.append(nic_info) hints = {} for hint in parsed_args.hint: key, _sep, value = hint.partition('=') # NOTE(vish): multiple copies of the same hint will # result in a list of values if key in hints: if isinstance(hints[key], six.string_types): hints[key] = [hints[key]] hints[key] += [value] else: hints[key] = value # What does a non-boolean value for config-drive do? # --config-drive argument is either a volume id or # 'True' (or '1') to use an ephemeral volume if str(parsed_args.config_drive).lower() in ("true", "1"): config_drive = True elif str(parsed_args.config_drive).lower() in ("false", "0", "", "none"): config_drive = None else: config_drive = parsed_args.config_drive boot_kwargs = dict(meta=parsed_args.property, files=files, reservation_id=None, min_count=parsed_args.min, max_count=parsed_args.max, security_groups=parsed_args.security_group, userdata=userdata, key_name=parsed_args.key_name, availability_zone=parsed_args.availability_zone, block_device_mapping=block_device_mapping, nics=nics, scheduler_hints=hints, config_drive=config_drive) self.log.debug('boot_args: %s', boot_args) self.log.debug('boot_kwargs: %s', boot_kwargs) # Wrap the call to catch exceptions in order to close files try: server = compute_client.servers.create(*boot_args, **boot_kwargs) finally: # Clean up open files - make sure they are not strings for f in files: if hasattr(f, 'close'): f.close() if hasattr(userdata, 'close'): userdata.close() if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('\n') else: self.log.error(_('Error creating server: %s'), parsed_args.server_name) sys.stdout.write(_('\nError creating server')) raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=_authenticated_fetcher(client)) env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_create: template_utils.hooks_to_env(env, parsed_args.pre_create, 'pre-create') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'parameters': parameters, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.dry_run: stack = client.stacks.preview(**fields) formatters = { 'description': heat_utils.text_wrap_formatter, 'template_description': heat_utils.text_wrap_formatter, 'stack_status_reason': heat_utils.text_wrap_formatter, 'parameters': heat_utils.json_formatter, 'outputs': heat_utils.json_formatter, 'resources': heat_utils.json_formatter, 'links': heat_utils.link_formatter, } columns = [] for key in stack.to_dict(): columns.append(key) columns.sort() return ( columns, utils.get_item_properties(stack, columns, formatters=formatters) ) stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: if not utils.wait_for_status(client.stacks.get, parsed_args.name, status_field='stack_status', success_status='create_complete', error_status='create_failed'): msg = _('Stack %s failed to create.') % parsed_args.name raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=_authenticated_fetcher(client), existing=parsed_args.existing) env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_update: template_utils.hooks_to_env(env, parsed_args.pre_update, 'pre-update') fields = { 'stack_id': parsed_args.stack, 'parameters': parameters, 'existing': parsed_args.existing, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.clear_parameter: fields['clear_parameters'] = list(parsed_args.clear_parameter) if parsed_args.rollback: rollback = parsed_args.rollback.strip().lower() if rollback not in ('enabled', 'disabled', 'keep'): msg = _('--rollback invalid value: %s') % parsed_args.rollback raise exc.CommandError(msg) if rollback != 'keep': fields['disable_rollback'] = rollback == 'disabled' if parsed_args.dry_run: changes = client.stacks.preview_update(**fields) fields = ['state', 'resource_name', 'resource_type', 'resource_identity'] columns = sorted(changes.get("resource_changes", {}).keys()) data = [heat_utils.json_formatter(changes["resource_changes"][key]) for key in columns] return columns, data client.stacks.update(**fields) if parsed_args.wait: if not utils.wait_for_status(client.stacks.get, parsed_args.stack, status_field='stack_status', success_status='update_complete', error_status='update_failed'): msg = _('Stack %s failed to update.') % parsed_args.stack raise exc.CommandError(msg) return _show_stack(client, parsed_args.stack, format='table', short=True)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing cluster = utils.get_resource(client.clusters, parsed_args.cluster) if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: template = json.loads(blob) except ValueError as e: raise exceptions.CommandError('An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) data = client.clusters.scale(cluster.id, template).to_dict() else: scale_object = {"add_node_groups": [], "resize_node_groups": []} scale_node_groups = dict( map(lambda x: x.split(':', 1), parsed_args.instances)) cluster_ng_map = { ng['node_group_template_id']: ng['name'] for ng in cluster.node_groups } for name, count in scale_node_groups.items(): ngt = utils.get_resource(client.node_group_templates, name) if ngt.id in cluster_ng_map: scale_object["resize_node_groups"].append({ "name": cluster_ng_map[ngt.id], "count": int(count) }) else: scale_object["add_node_groups"].append({ "node_group_template_id": ngt.id, "name": ngt.name, "count": int(count) }) if not scale_object['add_node_groups']: del scale_object['add_node_groups'] if not scale_object['resize_node_groups']: del scale_object['resize_node_groups'] data = client.clusters.scale(cluster.id, scale_object).cluster sys.stdout.write( 'Cluster "{cluster}" scaling has been started.\n'.format( cluster=parsed_args.cluster)) if parsed_args.wait: if not osc_utils.wait_for_status(client.clusters.get, data['id']): self.log.error('Error occurred during cluster scaling: %s' % cluster.id) data = client.clusters.get(cluster.id).to_dict() _format_cluster_output(data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute volume_client = self.app.client_manager.volume # Lookup parsed_args.image image = None if parsed_args.image: image = utils.find_resource( compute_client.images, parsed_args.image, ) # Lookup parsed_args.volume volume = None if parsed_args.volume: volume = utils.find_resource( volume_client.volumes, parsed_args.volume, ).id # Lookup parsed_args.flavor flavor = utils.find_resource(compute_client.flavors, parsed_args.flavor) boot_args = [parsed_args.server_name, image, flavor] files = {} for f in parsed_args.file: dst, src = f.split('=', 1) try: files[dst] = io.open(src, 'rb') except IOError as e: raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) if parsed_args.min > parsed_args.max: msg = _("min instances should be <= max instances") raise exceptions.CommandError(msg) if parsed_args.min < 1: msg = _("min instances should be > 0") raise exceptions.CommandError(msg) if parsed_args.max < 1: msg = _("max instances should be > 0") raise exceptions.CommandError(msg) userdata = None if parsed_args.user_data: try: userdata = io.open(parsed_args.user_data) except IOError as e: msg = "Can't open '%s': %s" raise exceptions.CommandError(msg % (parsed_args.user_data, e)) block_device_mapping = {} if volume: # When booting from volume, for now assume no other mappings # This device value is likely KVM-specific block_device_mapping = {'vda': volume} else: for dev_map in parsed_args.block_device_mapping: dev_key, dev_vol = dev_map.split('=', 1) block_volume = None if dev_vol: block_volume = utils.find_resource( volume_client.volumes, dev_vol, ).id block_device_mapping.update({dev_key: block_volume}) nics = [] if parsed_args.nic: neutron_enabled = self._is_neutron_enabled() for nic_str in parsed_args.nic: nic_info = {"net-id": "", "v4-fixed-ip": "", "v6-fixed-ip": "", "port-id": ""} nic_info.update(dict(kv_str.split("=", 1) for kv_str in nic_str.split(","))) if bool(nic_info["net-id"]) == bool(nic_info["port-id"]): msg = _("either net-id or port-id should be specified " "but not both") raise exceptions.CommandError(msg) if neutron_enabled: network_client = self.app.client_manager.network if nic_info["net-id"]: nic_info["net-id"] = common.find(network_client, 'network', 'networks', nic_info["net-id"]) if nic_info["port-id"]: nic_info["port-id"] = common.find(network_client, 'port', 'ports', nic_info["port-id"]) else: if nic_info["net-id"]: nic_info["net-id"] = utils.find_resource( compute_client.networks, nic_info["net-id"] ).id if nic_info["port-id"]: msg = _("can't create server with port specified " "since neutron not enabled") raise exceptions.CommandError(msg) nics.append(nic_info) hints = {} for hint in parsed_args.hint: key, _sep, value = hint.partition('=') # NOTE(vish): multiple copies of the same hint will # result in a list of values if key in hints: if isinstance(hints[key], six.string_types): hints[key] = [hints[key]] hints[key] += [value] else: hints[key] = value # What does a non-boolean value for config-drive do? # --config-drive argument is either a volume id or # 'True' (or '1') to use an ephemeral volume if str(parsed_args.config_drive).lower() in ("true", "1"): config_drive = True elif str(parsed_args.config_drive).lower() in ("false", "0", "", "none"): config_drive = None else: config_drive = parsed_args.config_drive boot_kwargs = dict( meta=parsed_args.property, files=files, reservation_id=None, min_count=parsed_args.min, max_count=parsed_args.max, security_groups=parsed_args.security_group, userdata=userdata, key_name=parsed_args.key_name, availability_zone=parsed_args.availability_zone, block_device_mapping=block_device_mapping, nics=nics, scheduler_hints=hints, config_drive=config_drive) self.log.debug('boot_args: %s', boot_args) self.log.debug('boot_kwargs: %s', boot_kwargs) # Wrap the call to catch exceptions in order to close files try: server = compute_client.servers.create(*boot_args, **boot_kwargs) finally: # Clean up open files - make sure they are not strings for f in files: if hasattr(f, 'close'): f.close() if hasattr(userdata, 'close'): userdata.close() if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('\n') else: self.log.error(_('Error creating server: %s'), parsed_args.server_name) sys.stdout.write(_('\nError creating server')) raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing network_client = self.app.client_manager.network if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: template = json.loads(blob) except ValueError as e: raise exceptions.CommandError( 'An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) if 'neutron_management_network' in template: template['net_id'] = template.pop('neutron_management_network') if 'count' in template: parsed_args.count = template['count'] data = client.clusters.create(**template).to_dict() else: if not parsed_args.name or not parsed_args.cluster_template \ or not parsed_args.image: raise exceptions.CommandError( 'At least --name , --cluster-template, --image arguments ' 'should be specified or json template should be provided ' 'with --json argument') plugin, version, template_id = _get_plugin_version( parsed_args.cluster_template, client) image_id = utils.get_resource(client.images, parsed_args.image).id net_id = (network_client.api.find_attr( 'networks', parsed_args.neutron_network)['id'] if parsed_args.neutron_network else None) data = client.clusters.create( name=parsed_args.name, plugin_name=plugin, hadoop_version=version, cluster_template_id=template_id, default_image_id=image_id, description=parsed_args.description, is_transient=parsed_args.transient, user_keypair_id=parsed_args.user_keypair, net_id=net_id, count=parsed_args.count, is_public=parsed_args.public, is_protected=parsed_args.protected ).to_dict() if parsed_args.count and parsed_args.count > 1: clusters = [ utils.get_resource(client.clusters, id) for id in data['clusters']] if parsed_args.wait: for cluster in clusters: if not osc_utils.wait_for_status( client.clusters.get, cluster.id): self.log.error( 'Error occurred during cluster creation: %s', data['id']) data = {} for cluster in clusters: data[cluster.name] = cluster.id else: if parsed_args.wait: if not osc_utils.wait_for_status( client.clusters.get, data['id']): self.log.error( 'Error occurred during cluster creation: %s', data['id']) data = client.clusters.get(data['id']).to_dict() _format_cluster_output(data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute # Lookup parsed_args.image image = utils.find_resource(compute_client.images, parsed_args.image) # Lookup parsed_args.flavor flavor = utils.find_resource(compute_client.flavors, parsed_args.flavor) boot_args = [parsed_args.server_name, image, flavor] files = {} for f in parsed_args.file: dst, src = f.split('=', 1) try: files[dst] = open(src) except IOError as e: raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) if parsed_args.min > parsed_args.max: msg = "min instances should be <= max instances" raise exceptions.CommandError(msg) if parsed_args.min < 1: msg = "min instances should be > 0" raise exceptions.CommandError(msg) if parsed_args.max < 1: msg = "max instances should be > 0" raise exceptions.CommandError(msg) userdata = None if parsed_args.user_data: try: userdata = open(parsed_args.user_data) except IOError as e: msg = "Can't open '%s': %s" raise exceptions.CommandError(msg % (parsed_args.user_data, e)) block_device_mapping = dict( v.split('=', 1) for v in parsed_args.block_device_mapping) nics = [] for nic_str in parsed_args.nic: nic_info = {"net-id": "", "v4-fixed-ip": ""} nic_info.update( dict(kv_str.split("=", 1) for kv_str in nic_str.split(","))) nics.append(nic_info) hints = {} for hint in parsed_args.hint: key, _sep, value = hint.partition('=') # NOTE(vish): multiple copies of the same hint will # result in a list of values if key in hints: if isinstance(hints[key], six.string_types): hints[key] = [hints[key]] hints[key] += [value] else: hints[key] = value # What does a non-boolean value for config-drive do? # --config-drive argument is either a volume id or # 'True' (or '1') to use an ephemeral volume if str(parsed_args.config_drive).lower() in ("true", "1"): config_drive = True elif str(parsed_args.config_drive).lower() in ("false", "0", "", "none"): config_drive = None else: config_drive = parsed_args.config_drive boot_kwargs = dict(meta=parsed_args.property, files=files, reservation_id=None, min_count=parsed_args.min, max_count=parsed_args.max, security_groups=parsed_args.security_group, userdata=userdata, key_name=parsed_args.key_name, availability_zone=parsed_args.availability_zone, block_device_mapping=block_device_mapping, nics=nics, scheduler_hints=hints, config_drive=config_drive) self.log.debug('boot_args: %s', boot_args) self.log.debug('boot_kwargs: %s', boot_kwargs) server = compute_client.servers.create(*boot_args, **boot_kwargs) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('\n') else: self.log.error('Error creating server: %s', parsed_args.server_name) sys.stdout.write('\nError creating server') raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) compute_client = self.app.client_manager.compute # Lookup parsed_args.image image = utils.find_resource(compute_client.images, parsed_args.image) # Lookup parsed_args.flavor flavor = utils.find_resource(compute_client.flavors, parsed_args.flavor) boot_args = [parsed_args.server_name, image, flavor] files = {} for f in parsed_args.file: dst, src = f.split('=', 1) try: files[dst] = open(src) except IOError as e: raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) if parsed_args.min > parsed_args.max: raise exceptions.CommandError("min instances should be <= " "max instances") if parsed_args.min < 1: raise exceptions.CommandError("min instances should be > 0") if parsed_args.max < 1: raise exceptions.CommandError("max instances should be > 0") userdata = None if parsed_args.user_data: try: userdata = open(parsed_args.user_data) except IOError as e: raise exceptions.CommandError("Can't open '%s': %s" % (parsed_args.user_data, e)) block_device_mapping = dict(v.split('=', 1) for v in parsed_args.block_device_mapping) nics = [] for nic_str in parsed_args.nic: nic_info = {"net-id": "", "v4-fixed-ip": ""} nic_info.update(dict(kv_str.split("=", 1) for kv_str in nic_str.split(","))) nics.append(nic_info) hints = {} for hint in parsed_args.hint: key, _sep, value = hint.partition('=') # NOTE(vish): multiple copies of the same hint will # result in a list of values if key in hints: if isinstance(hints[key], six.string_types): hints[key] = [hints[key]] hints[key] += [value] else: hints[key] = value # What does a non-boolean value for config-drive do? # --config-drive argument is either a volume id or # 'True' (or '1') to use an ephemeral volume if str(parsed_args.config_drive).lower() in ("true", "1"): config_drive = True elif str(parsed_args.config_drive).lower() in ("false", "0", "", "none"): config_drive = None else: config_drive = parsed_args.config_drive boot_kwargs = dict( meta=parsed_args.property, files=files, reservation_id=None, min_count=parsed_args.min, max_count=parsed_args.max, security_groups=parsed_args.security_group, userdata=userdata, key_name=parsed_args.key_name, availability_zone=parsed_args.availability_zone, block_device_mapping=block_device_mapping, nics=nics, scheduler_hints=hints, config_drive=config_drive) self.log.debug('boot_args: %s', boot_args) self.log.debug('boot_kwargs: %s', boot_kwargs) server = compute_client.servers.create(*boot_args, **boot_kwargs) if parsed_args.wait: if utils.wait_for_status( compute_client.servers.get, server.id, callback=_show_progress, ): sys.stdout.write('\n') else: self.log.error('Error creating server: %s', parsed_args.server_name) sys.stdout.write('\nError creating server') raise SystemExit details = _prep_server_detail(compute_client, server) return zip(*sorted(six.iteritems(details)))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing network_client = self.app.client_manager.network if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: template = json.loads(blob) except ValueError as e: raise exceptions.CommandError('An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) if 'neutron_management_network' in template: template['net_id'] = template.pop('neutron_management_network') if 'count' in template: parsed_args.count = template['count'] data = client.clusters.create(**template).to_dict() else: if not parsed_args.name or not parsed_args.cluster_template \ or not parsed_args.image: raise exceptions.CommandError( 'At least --name , --cluster-template, --image arguments ' 'should be specified or json template should be provided ' 'with --json argument') plugin, plugin_version, template_id = _get_plugin_version( parsed_args.cluster_template, client) image_id = utils.get_resource_id(client.images, parsed_args.image) net_id = (network_client.find_network(parsed_args.neutron_network, ignore_missing=False).id if parsed_args.neutron_network else None) data = client.clusters.create( name=parsed_args.name, plugin_name=plugin, hadoop_version=plugin_version, cluster_template_id=template_id, default_image_id=image_id, description=parsed_args.description, is_transient=parsed_args.transient, user_keypair_id=parsed_args.user_keypair, net_id=net_id, count=parsed_args.count, is_public=parsed_args.public, is_protected=parsed_args.protected).to_dict() if parsed_args.count and parsed_args.count > 1: clusters = [ utils.get_resource(client.clusters, id) for id in data['clusters'] ] if parsed_args.wait: for cluster in clusters: if not osc_utils.wait_for_status(client.clusters.get, cluster.id): self.log.error( 'Error occurred during cluster creation: %s' % data['id']) data = {} for cluster in clusters: data[cluster.name] = cluster.id else: if parsed_args.wait: if not osc_utils.wait_for_status(client.clusters.get, data['id']): self.log.error( 'Error occurred during cluster creation: %s' % data['id']) data = client.clusters.get(data['id']).to_dict() _format_cluster_output(data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data)