def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, image=None, key_name=None, loadbalancers=None, meta=None, min_entities=0, max_entities=0, name=None, networks=None, server_name=None, state='present', user_data=None, config_drive=False, wait=True, wait_timeout=300): files = {} if files is None else files loadbalancers = [] if loadbalancers is None else loadbalancers meta = {} if meta is None else meta networks = [] if networks is None else networks changed = False au = pyrax.autoscale if not au: module.fail_json(msg='Failed to instantiate clients. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if user_data: config_drive = True if user_data and os.path.isfile(user_data): try: f = open(user_data) user_data = f.read() f.close() except Exception as e: module.fail_json(msg='Failed to load %s' % user_data) if state == 'present': # Normalize and ensure all metadata values are strings if meta: for k, v in meta.items(): if isinstance(v, list): meta[k] = ','.join(['%s' % i for i in v]) elif isinstance(v, dict): meta[k] = json.dumps(v) elif not isinstance(v, string_types): meta[k] = '%s' % v if image: image = rax_find_image(module, pyrax, image) nics = [] if networks: for network in networks: nics.extend(rax_find_network(module, pyrax, network)) for nic in nics: # pyrax is currently returning net-id, but we need uuid # this check makes this forward compatible for a time when # pyrax uses uuid instead if nic.get('net-id'): nic.update(uuid=nic['net-id']) del nic['net-id'] # Handle the file contents personality = [] if files: for rpath in files.keys(): lpath = os.path.expanduser(files[rpath]) try: f = open(lpath, 'r') personality.append({'path': rpath, 'contents': f.read()}) f.close() except Exception as e: module.fail_json(msg='Failed to load %s' % lpath) lbs = [] if loadbalancers: for lb in loadbalancers: try: lb_id = int(lb.get('id')) except (ValueError, TypeError): module.fail_json(msg='Load balancer ID is not an integer: ' '%s' % lb.get('id')) try: port = int(lb.get('port')) except (ValueError, TypeError): module.fail_json(msg='Load balancer port is not an ' 'integer: %s' % lb.get('port')) if not lb_id or not port: continue lbs.append((lb_id, port)) try: sg = au.find(name=name) except pyrax.exceptions.NoUniqueMatch as e: module.fail_json(msg='%s' % e.message) except pyrax.exceptions.NotFound: try: sg = au.create(name, cooldown=cooldown, min_entities=min_entities, max_entities=max_entities, launch_config_type='launch_server', server_name=server_name, image=image, flavor=flavor, disk_config=disk_config, metadata=meta, personality=personality, networks=nics, load_balancers=lbs, key_name=key_name, config_drive=config_drive, user_data=user_data) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) if not changed: # Scaling Group Updates group_args = {} if cooldown != sg.cooldown: group_args['cooldown'] = cooldown if min_entities != sg.min_entities: group_args['min_entities'] = min_entities if max_entities != sg.max_entities: group_args['max_entities'] = max_entities if group_args: changed = True sg.update(**group_args) # Launch Configuration Updates lc = sg.get_launch_config() lc_args = {} if server_name != lc.get('name'): lc_args['server_name'] = server_name if image != lc.get('image'): lc_args['image'] = image if flavor != lc.get('flavor'): lc_args['flavor'] = flavor disk_config = disk_config or 'AUTO' if ((disk_config or lc.get('disk_config')) and disk_config != lc.get('disk_config', 'AUTO')): lc_args['disk_config'] = disk_config if (meta or lc.get('meta')) and meta != lc.get('metadata'): lc_args['metadata'] = meta test_personality = [] for p in personality: test_personality.append({ 'path': p['path'], 'contents': base64.b64encode(p['contents']) }) if ((test_personality or lc.get('personality')) and test_personality != lc.get('personality')): lc_args['personality'] = personality if nics != lc.get('networks'): lc_args['networks'] = nics if lbs != lc.get('load_balancers'): # Work around for https://github.com/rackspace/pyrax/pull/393 lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) if key_name != lc.get('key_name'): lc_args['key_name'] = key_name if config_drive != lc.get('config_drive', False): lc_args['config_drive'] = config_drive if (user_data and base64.b64encode(user_data) != lc.get('user_data')): lc_args['user_data'] = user_data if lc_args: # Work around for https://github.com/rackspace/pyrax/pull/389 if 'flavor' not in lc_args: lc_args['flavor'] = lc.get('flavor') changed = True sg.update_launch_config(**lc_args) sg.get() if wait: end_time = time.time() + wait_timeout infinite = wait_timeout == 0 while infinite or time.time() < end_time: state = sg.get_state() if state["pending_capacity"] == 0: break time.sleep(5) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) else: try: sg = au.find(name=name) sg.delete() changed = True except pyrax.exceptions.NotFound as e: sg = {} except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, image=None, key_name=None, loadbalancers=None, meta=None, min_entities=0, max_entities=0, name=None, networks=None, server_name=None, state='present', user_data=None, config_drive=False, wait=True, wait_timeout=300): files = {} if files is None else files loadbalancers = [] if loadbalancers is None else loadbalancers meta = {} if meta is None else meta networks = [] if networks is None else networks changed = False au = pyrax.autoscale if not au: module.fail_json(msg='Failed to instantiate clients. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if user_data: config_drive = True if user_data and os.path.isfile(user_data): try: f = open(user_data) user_data = f.read() f.close() except Exception as e: module.fail_json(msg='Failed to load %s' % user_data) if state == 'present': # Normalize and ensure all metadata values are strings if meta: for k, v in meta.items(): if isinstance(v, list): meta[k] = ','.join(['%s' % i for i in v]) elif isinstance(v, dict): meta[k] = json.dumps(v) elif not isinstance(v, string_types): meta[k] = '%s' % v if image: image = rax_find_image(module, pyrax, image) nics = [] if networks: for network in networks: nics.extend(rax_find_network(module, pyrax, network)) for nic in nics: # pyrax is currently returning net-id, but we need uuid # this check makes this forward compatible for a time when # pyrax uses uuid instead if nic.get('net-id'): nic.update(uuid=nic['net-id']) del nic['net-id'] # Handle the file contents personality = [] if files: for rpath in files.keys(): lpath = os.path.expanduser(files[rpath]) try: f = open(lpath, 'r') personality.append({ 'path': rpath, 'contents': f.read() }) f.close() except Exception as e: module.fail_json(msg='Failed to load %s' % lpath) lbs = [] if loadbalancers: for lb in loadbalancers: try: lb_id = int(lb.get('id')) except (ValueError, TypeError): module.fail_json(msg='Load balancer ID is not an integer: ' '%s' % lb.get('id')) try: port = int(lb.get('port')) except (ValueError, TypeError): module.fail_json(msg='Load balancer port is not an ' 'integer: %s' % lb.get('port')) if not lb_id or not port: continue lbs.append((lb_id, port)) try: sg = au.find(name=name) except pyrax.exceptions.NoUniqueMatch as e: module.fail_json(msg='%s' % e.message) except pyrax.exceptions.NotFound: try: sg = au.create(name, cooldown=cooldown, min_entities=min_entities, max_entities=max_entities, launch_config_type='launch_server', server_name=server_name, image=image, flavor=flavor, disk_config=disk_config, metadata=meta, personality=personality, networks=nics, load_balancers=lbs, key_name=key_name, config_drive=config_drive, user_data=user_data) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) if not changed: # Scaling Group Updates group_args = {} if cooldown != sg.cooldown: group_args['cooldown'] = cooldown if min_entities != sg.min_entities: group_args['min_entities'] = min_entities if max_entities != sg.max_entities: group_args['max_entities'] = max_entities if group_args: changed = True sg.update(**group_args) # Launch Configuration Updates lc = sg.get_launch_config() lc_args = {} if server_name != lc.get('name'): lc_args['server_name'] = server_name if image != lc.get('image'): lc_args['image'] = image if flavor != lc.get('flavor'): lc_args['flavor'] = flavor disk_config = disk_config or 'AUTO' if ((disk_config or lc.get('disk_config')) and disk_config != lc.get('disk_config', 'AUTO')): lc_args['disk_config'] = disk_config if (meta or lc.get('meta')) and meta != lc.get('metadata'): lc_args['metadata'] = meta test_personality = [] for p in personality: test_personality.append({ 'path': p['path'], 'contents': base64.b64encode(p['contents']) }) if ((test_personality or lc.get('personality')) and test_personality != lc.get('personality')): lc_args['personality'] = personality if nics != lc.get('networks'): lc_args['networks'] = nics if lbs != lc.get('load_balancers'): # Work around for https://github.com/rackspace/pyrax/pull/393 lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) if key_name != lc.get('key_name'): lc_args['key_name'] = key_name if config_drive != lc.get('config_drive', False): lc_args['config_drive'] = config_drive if (user_data and base64.b64encode(user_data) != lc.get('user_data')): lc_args['user_data'] = user_data if lc_args: # Work around for https://github.com/rackspace/pyrax/pull/389 if 'flavor' not in lc_args: lc_args['flavor'] = lc.get('flavor') changed = True sg.update_launch_config(**lc_args) sg.get() if wait: end_time = time.time() + wait_timeout infinite = wait_timeout == 0 while infinite or time.time() < end_time: state = sg.get_state() if state["pending_capacity"] == 0: break time.sleep(5) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) else: try: sg = au.find(name=name) sg.delete() changed = True except pyrax.exceptions.NotFound as e: sg = {} except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
def cloudservers(module, state=None, name=None, flavor=None, image=None, meta=None, key_name=None, files=None, wait=True, wait_timeout=300, disk_config=None, count=1, group=None, instance_ids=None, exact_count=False, networks=None, count_offset=0, auto_increment=False, extra_create_args=None, user_data=None, config_drive=False, boot_from_volume=False, boot_volume=None, boot_volume_size=None, boot_volume_terminate=False): meta = {} if meta is None else meta files = {} if files is None else files instance_ids = [] if instance_ids is None else instance_ids networks = [] if networks is None else networks extra_create_args = {} if extra_create_args is None else extra_create_args cs = pyrax.cloudservers cnw = pyrax.cloud_networks if not cnw: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if state == 'present' or (state == 'absent' and instance_ids is None): if not boot_from_volume and not boot_volume and not image: module.fail_json(msg='image is required for the "rax" module') for arg, value in dict(name=name, flavor=flavor).items(): if not value: module.fail_json(msg='%s is required for the "rax" module' % arg) if boot_from_volume and not image and not boot_volume: module.fail_json(msg='image or boot_volume are required for the ' '"rax" with boot_from_volume') if boot_from_volume and image and not boot_volume_size: module.fail_json(msg='boot_volume_size is required for the "rax" ' 'module with boot_from_volume and image') if boot_from_volume and image and boot_volume: image = None servers = [] # Add the group meta key if group and 'group' not in meta: meta['group'] = group elif 'group' in meta and group is None: group = meta['group'] # Normalize and ensure all metadata values are strings for k, v in meta.items(): if isinstance(v, list): meta[k] = ','.join(['%s' % i for i in v]) elif isinstance(v, dict): meta[k] = json.dumps(v) elif not isinstance(v, string_types): meta[k] = '%s' % v # When using state=absent with group, the absent block won't match the # names properly. Use the exact_count functionality to decrease the count # to the desired level was_absent = False if group is not None and state == 'absent': exact_count = True state = 'present' was_absent = True if image: image = rax_find_image(module, pyrax, image) nics = [] if networks: for network in networks: nics.extend(rax_find_network(module, pyrax, network)) # act on the state if state == 'present': # Idempotent ensurance of a specific count of servers if exact_count is not False: # See if we can find servers that match our options if group is None: module.fail_json(msg='"group" must be provided when using ' '"exact_count"') if auto_increment: numbers = set() # See if the name is a printf like string, if not append # %d to the end try: name % 0 except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) if match: number = int(match.group(1)) numbers.add(number) number_range = xrange(count_offset, count_offset + count) available_numbers = list(set(number_range).difference(numbers)) else: # Not auto incrementing for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) # available_numbers not needed here, we inspect auto_increment # again later # If state was absent but the count was changed, # assume we only wanted to remove that number of instances if was_absent: diff = len(servers) - count if diff < 0: count = 0 else: count = diff if len(servers) > count: # We have more servers than we need, set state='absent' # and delete the extras, this should delete the oldest state = 'absent' kept = servers[:count] del servers[:count] instance_ids = [] for server in servers: instance_ids.append(server.id) delete(module, instance_ids=instance_ids, wait=wait, wait_timeout=wait_timeout, kept=kept) elif len(servers) < count: # we have fewer servers than we need if auto_increment: # auto incrementing server numbers names = [] name_slice = count - len(servers) numbers_to_use = available_numbers[:name_slice] for number in numbers_to_use: names.append(name % number) else: # We are not auto incrementing server numbers, # create a list of 'name' that matches how many we need names = [name] * (count - len(servers)) else: # we have the right number of servers, just return info # about all of the matched servers instances = [] instance_ids = [] for server in servers: instances.append(rax_to_dict(server, 'server')) instance_ids.append(server.id) module.exit_json(changed=False, action=None, instances=instances, success=[], error=[], timeout=[], instance_ids={ 'instances': instance_ids, 'success': [], 'error': [], 'timeout': [] }) else: # not called with exact_count=True if group is not None: if auto_increment: # we are auto incrementing server numbers, but not with # exact_count numbers = set() # See if the name is a printf like string, if not append # %d to the end try: name % 0 except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) if match: number = int(match.group(1)) numbers.add(number) number_range = xrange(count_offset, count_offset + count + len(numbers)) available_numbers = list( set(number_range).difference(numbers)) names = [] numbers_to_use = available_numbers[:count] for number in numbers_to_use: names.append(name % number) else: # Not auto incrementing names = [name] * count else: # No group was specified, and not using exact_count # Perform more simplistic matching search_opts = {'name': '^%s$' % name, 'flavor': flavor} servers = [] for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue if not rax_find_server_image(module, server, image, boot_volume): continue # Ignore servers with non matching metadata if server.metadata != meta: continue servers.append(server) if len(servers) >= count: # We have more servers than were requested, don't do # anything. Not running with exact_count=True, so we assume # more is OK instances = [] for server in servers: instances.append(rax_to_dict(server, 'server')) instance_ids = [i['id'] for i in instances] module.exit_json(changed=False, action=None, instances=instances, success=[], error=[], timeout=[], instance_ids={ 'instances': instance_ids, 'success': [], 'error': [], 'timeout': [] }) # We need more servers to reach out target, create names for # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) block_device_mapping_v2 = [] if boot_from_volume: mapping = { 'boot_index': '0', 'delete_on_termination': boot_volume_terminate, 'destination_type': 'volume', } if image: mapping.update({ 'uuid': image, 'source_type': 'image', 'volume_size': boot_volume_size, }) image = None elif boot_volume: volume = rax_find_volume(module, pyrax, boot_volume) mapping.update({ 'uuid': pyrax.utils.get_id(volume), 'source_type': 'volume', }) block_device_mapping_v2.append(mapping) create(module, names=names, flavor=flavor, image=image, meta=meta, key_name=key_name, files=files, wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, group=group, nics=nics, extra_create_args=extra_create_args, user_data=user_data, config_drive=config_drive, existing=servers, block_device_mapping_v2=block_device_mapping_v2) elif state == 'absent': if instance_ids is None: # We weren't given an explicit list of server IDs to delete # Let's match instead search_opts = {'name': '^%s$' % name, 'flavor': flavor} for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue if not rax_find_server_image(module, server, image, boot_volume): continue # Ignore servers with non matching metadata if meta != server.metadata: continue servers.append(server) # Build a list of server IDs to delete instance_ids = [] for server in servers: if len(instance_ids) < count: instance_ids.append(server.id) else: break if not instance_ids: # No server IDs were matched for deletion, or no IDs were # explicitly provided, just exit and don't do anything module.exit_json(changed=False, action=None, instances=[], success=[], error=[], timeout=[], instance_ids={ 'instances': [], 'success': [], 'error': [], 'timeout': [] }) delete(module, instance_ids=instance_ids, wait=wait, wait_timeout=wait_timeout)
def cloudservers(module, state=None, name=None, flavor=None, image=None, meta=None, key_name=None, files=None, wait=True, wait_timeout=300, disk_config=None, count=1, group=None, instance_ids=None, exact_count=False, networks=None, count_offset=0, auto_increment=False, extra_create_args=None, user_data=None, config_drive=False, boot_from_volume=False, boot_volume=None, boot_volume_size=None, boot_volume_terminate=False): meta = {} if meta is None else meta files = {} if files is None else files instance_ids = [] if instance_ids is None else instance_ids networks = [] if networks is None else networks extra_create_args = {} if extra_create_args is None else extra_create_args cs = pyrax.cloudservers cnw = pyrax.cloud_networks if not cnw: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if state == 'present' or (state == 'absent' and instance_ids is None): if not boot_from_volume and not boot_volume and not image: module.fail_json(msg='image is required for the "rax" module') for arg, value in dict(name=name, flavor=flavor).items(): if not value: module.fail_json(msg='%s is required for the "rax" module' % arg) if boot_from_volume and not image and not boot_volume: module.fail_json(msg='image or boot_volume are required for the ' '"rax" with boot_from_volume') if boot_from_volume and image and not boot_volume_size: module.fail_json(msg='boot_volume_size is required for the "rax" ' 'module with boot_from_volume and image') if boot_from_volume and image and boot_volume: image = None servers = [] # Add the group meta key if group and 'group' not in meta: meta['group'] = group elif 'group' in meta and group is None: group = meta['group'] # Normalize and ensure all metadata values are strings for k, v in meta.items(): if isinstance(v, list): meta[k] = ','.join(['%s' % i for i in v]) elif isinstance(v, dict): meta[k] = json.dumps(v) elif not isinstance(v, string_types): meta[k] = '%s' % v # When using state=absent with group, the absent block won't match the # names properly. Use the exact_count functionality to decrease the count # to the desired level was_absent = False if group is not None and state == 'absent': exact_count = True state = 'present' was_absent = True if image: image = rax_find_image(module, pyrax, image) nics = [] if networks: for network in networks: nics.extend(rax_find_network(module, pyrax, network)) # act on the state if state == 'present': # Idempotent ensurance of a specific count of servers if exact_count is not False: # See if we can find servers that match our options if group is None: module.fail_json(msg='"group" must be provided when using ' '"exact_count"') if auto_increment: numbers = set() # See if the name is a printf like string, if not append # %d to the end try: name % 0 except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) if match: number = int(match.group(1)) numbers.add(number) number_range = xrange(count_offset, count_offset + count) available_numbers = list(set(number_range) .difference(numbers)) else: # Not auto incrementing for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) # available_numbers not needed here, we inspect auto_increment # again later # If state was absent but the count was changed, # assume we only wanted to remove that number of instances if was_absent: diff = len(servers) - count if diff < 0: count = 0 else: count = diff if len(servers) > count: # We have more servers than we need, set state='absent' # and delete the extras, this should delete the oldest state = 'absent' kept = servers[:count] del servers[:count] instance_ids = [] for server in servers: instance_ids.append(server.id) delete(module, instance_ids=instance_ids, wait=wait, wait_timeout=wait_timeout, kept=kept) elif len(servers) < count: # we have fewer servers than we need if auto_increment: # auto incrementing server numbers names = [] name_slice = count - len(servers) numbers_to_use = available_numbers[:name_slice] for number in numbers_to_use: names.append(name % number) else: # We are not auto incrementing server numbers, # create a list of 'name' that matches how many we need names = [name] * (count - len(servers)) else: # we have the right number of servers, just return info # about all of the matched servers instances = [] instance_ids = [] for server in servers: instances.append(rax_to_dict(server, 'server')) instance_ids.append(server.id) module.exit_json(changed=False, action=None, instances=instances, success=[], error=[], timeout=[], instance_ids={'instances': instance_ids, 'success': [], 'error': [], 'timeout': []}) else: # not called with exact_count=True if group is not None: if auto_increment: # we are auto incrementing server numbers, but not with # exact_count numbers = set() # See if the name is a printf like string, if not append # %d to the end try: name % 0 except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): # Ignore DELETED servers if server.status == 'DELETED': continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) if match: number = int(match.group(1)) numbers.add(number) number_range = xrange(count_offset, count_offset + count + len(numbers)) available_numbers = list(set(number_range) .difference(numbers)) names = [] numbers_to_use = available_numbers[:count] for number in numbers_to_use: names.append(name % number) else: # Not auto incrementing names = [name] * count else: # No group was specified, and not using exact_count # Perform more simplistic matching search_opts = { 'name': '^%s$' % name, 'flavor': flavor } servers = [] for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue if not rax_find_server_image(module, server, image, boot_volume): continue # Ignore servers with non matching metadata if server.metadata != meta: continue servers.append(server) if len(servers) >= count: # We have more servers than were requested, don't do # anything. Not running with exact_count=True, so we assume # more is OK instances = [] for server in servers: instances.append(rax_to_dict(server, 'server')) instance_ids = [i['id'] for i in instances] module.exit_json(changed=False, action=None, instances=instances, success=[], error=[], timeout=[], instance_ids={'instances': instance_ids, 'success': [], 'error': [], 'timeout': []}) # We need more servers to reach out target, create names for # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) block_device_mapping_v2 = [] if boot_from_volume: mapping = { 'boot_index': '0', 'delete_on_termination': boot_volume_terminate, 'destination_type': 'volume', } if image: mapping.update({ 'uuid': image, 'source_type': 'image', 'volume_size': boot_volume_size, }) image = None elif boot_volume: volume = rax_find_volume(module, pyrax, boot_volume) mapping.update({ 'uuid': pyrax.utils.get_id(volume), 'source_type': 'volume', }) block_device_mapping_v2.append(mapping) create(module, names=names, flavor=flavor, image=image, meta=meta, key_name=key_name, files=files, wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, group=group, nics=nics, extra_create_args=extra_create_args, user_data=user_data, config_drive=config_drive, existing=servers, block_device_mapping_v2=block_device_mapping_v2) elif state == 'absent': if instance_ids is None: # We weren't given an explicit list of server IDs to delete # Let's match instead search_opts = { 'name': '^%s$' % name, 'flavor': flavor } for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue if not rax_find_server_image(module, server, image, boot_volume): continue # Ignore servers with non matching metadata if meta != server.metadata: continue servers.append(server) # Build a list of server IDs to delete instance_ids = [] for server in servers: if len(instance_ids) < count: instance_ids.append(server.id) else: break if not instance_ids: # No server IDs were matched for deletion, or no IDs were # explicitly provided, just exit and don't do anything module.exit_json(changed=False, action=None, instances=[], success=[], error=[], timeout=[], instance_ids={'instances': [], 'success': [], 'error': [], 'timeout': []}) delete(module, instance_ids=instance_ids, wait=wait, wait_timeout=wait_timeout)