def _setup_userdata(config, pool): userdata = config.userdata.decode('utf-8') # Copy the userdata_macros and populate with internal variables userdata_macros = dict(config.userdata_macros) userdata_macros["EC2SPOTMANAGER_POOLID"] = str(pool.id) userdata_macros["EC2SPOTMANAGER_CYCLETIME"] = str(config.cycle_interval) userdata = UserData.handle_tags(userdata, userdata_macros) if not userdata: logger.error("[Pool %d] Failed to compile userdata.", pool.id) raise Exception("Configuration error: Failed to compile userdata") return userdata
def test_macro_list_docker(): assert UserData.handle_tags( userdata_test_macro_docker, test_macros) == userdata_test_macro_docker_expected
def test_macro_list_export(): assert UserData.handle_tags( userdata_test_macro_export, test_macros) == userdata_test_macro_export_expected
def test_macro_defaults(): assert UserData.handle_tags( userdata_test_macros_with_defaults, test_macros) == userdata_test_macros_with_defaults_expected
def test_macro_replacement(): assert UserData.handle_tags(userdata_test_macros, test_macros) == userdata_test_macros_expected
def _start_pool_instances(pool, config, count=1): """ Start an instance with the given configuration """ from .models import Instance, PoolStatusEntry, POOL_STATUS_ENTRY_TYPE cache = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB) try: # Figure out where to put our instances region, zone, instance_type, rejected_prices = _determine_best_location(config, count) priceLowEntries = PoolStatusEntry.objects.filter(pool=pool, type=POOL_STATUS_ENTRY_TYPE['price-too-low']) if not region: logger.warning("[Pool %d] No allowed region was cheap enough to spawn instances.", pool.id) if not priceLowEntries: msg = "No allowed regions was cheap enough to spawn instances." for zone in rejected_prices: msg += "\n%s at %s" % (zone, rejected_prices[zone]) _update_pool_status(pool, 'price-too-low', msg) return elif priceLowEntries: priceLowEntries.delete() cloud_provider = CloudProvider.get_instance(PROVIDERS[0]) # TODO: support multiple providers image_name = cloud_provider.get_image_name(config) cores_per_instance = cloud_provider.get_cores_per_instance() # convert count from cores to instances # # if we have chosen the smallest possible instance that will put us over the requested core count, # we will only be spawning 1 instance # # otherwise there may be a remainder if this is not an even division. let that be handled in the next tick # so that the next smallest instance will be considered # # eg. need 12 cores, and allow instances sizes of 4 and 8 cores, # 8-core instance costs $0.24 ($0.03/core) # 4-core instance costs $0.16 ($0.04/core) # # -> we will only request 1x 8-core instance this time around, leaving the required count at 4 # -> next time around, we will request 1x 4-core instance count = max(1, count // cores_per_instance[instance_type]) # setup userdata userdata = config.userdata.decode('utf-8') # Copy the userdata_macros and populate with internal variables userdata_macros = dict(config.userdata_macros) userdata_macros["EC2SPOTMANAGER_POOLID"] = str(pool.id) userdata_macros["EC2SPOTMANAGER_CYCLETIME"] = str(config.cycle_interval) userdata = UserData.handle_tags(userdata, userdata_macros) if not userdata: logger.error("[Pool %d] Failed to compile userdata.", pool.id) _update_pool_status(pool, "config-error", "Configuration error: Failed to compile userdata") return image_key = "%s:image:%s:%s" % (cloud_provider.get_name(), region, image_name) image = cache.get(image_key) if image is None: image = cloud_provider.get_image(region, config) cache.set(image_key, image, ex=24 * 3600) requested_instances = cloud_provider.start_instances(config, region, zone, userdata, image, instance_type, count) for requested_instance in requested_instances: instance = Instance() instance.instance_id = requested_instance instance.region = region instance.zone = zone instance.status_code = INSTANCE_STATE["requested"] instance.pool = pool instance.size = cores_per_instance[instance_type] instance.save() except CloudProviderError as err: _update_pool_status(pool, err.TYPE, err.message) except Exception as msg: _update_pool_status(pool, 'unclassified', str(msg))
def main(cls, args, settings=None, userdata=None): # pylint: disable=too-many-branches,too-many-statements args.only = UserData.convert_pair_to_dict(args.only or '') args.tags = UserData.convert_pair_to_dict(args.tags or '') args.image_args = UserData.convert_str_to_int(UserData.convert_pair_to_dict(args.image_args or {})) logger.info('Using image definition "%s" from %s', Focus.info(args.image_name), Focus.info(args.images.name)) try: images = json.loads(args.images.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.images.name, msg) return 1 if not userdata: return 1 images[args.image_name]['user_data'] = userdata.encode('utf-8') if args.image_args: logger.info('Setting custom image parameters for upcoming instances: %r ', args.image_args) images[args.image_name].update(args.image_args) logger.info('Using Boto configuration profile "%s"', Focus.info(args.profile)) if args.zone: for image_name in images: images[image_name]['placement'] = args.zone cluster = EC2Manager(images) try: cluster.connect(profile_name=args.profile, region=args.region) except EC2ManagerException as msg: logger.error(msg) return 1 if args.create_on_demand: try: cluster.create_on_demand(args.image_name, args.tags, args.root_device_type, args.ebs_size, args.ebs_volume_type, args.ebs_volume_delete_on_termination) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.create_spot: try: cluster.create_spot(args.max_spot_price, args.image_name, args.tags, args.root_device_type, args.ebs_size, args.ebs_volume_type, args.ebs_volume_delete_on_termination) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.stop: try: cluster.stop(cluster.find(filters=args.only), int(args.stop)) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.terminate: try: cluster.terminate(cluster.find(filters=args.only), int(args.terminate)) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.status: try: for i in cluster.find(filters=args.only): logger.info('%s is %s at %s - tags: %s', i.id, i.state, i.ip_address, i.tags) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.run: ssh = settings.get('SSH') if not ssh: logger.error('No SSH settings defined in %s', args.settings.name) return 1 identity = ssh.get('identity') if not identity: logger.error('Key for SSH is not defined.') return 1 identity = os.path.expanduser(identity) username = ssh.get('username') if not username: logger.error('User for SSH is not defined.') return 1 logger.info('Bucketing available instances.') hosts = [] try: for host in cluster.find(filters=args.only): hosts.append(host) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 logger.info('Executing remote commands on %d instances.', len(hosts)) return 0
def main(cls, args, settings=None, userdata=None): # pylint: disable=too-many-branches,too-many-statements """Main entry point of this module. """ # Packet Configuration logger.info('Using Packet configuration from %s', Focus.info(args.conf.name)) try: conf = json.loads(args.conf.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.conf.name, msg) return 1 # Handle Tags if args.tags and (args.create_spot or args.create_demand): logger.info('Assigning the following tags to the instance: %r', args.tags) if args.only: try: args.only = UserData.parse_only_criterias(args.only) except ValueError: logger.error('-only requires format of: name=value') return 1 logging.info('Using filter %r to return only matching devices.', args.only) # Packet Manager try: cluster = PacketManager(conf) except PacketManagerException as msg: logger.error(msg) return 1 project = cluster.conf.get('projects').get(args.project) # List Operations if args.list_projects: cluster.print_projects(cluster.list_projects()) if args.list_plans: cluster.print_plans(cluster.list_plans()) if args.list_spot_prices: cluster.print_spot_prices(cluster.list_spot_prices()) if args.list_operating_systems: cluster.print_operating_systems(cluster.list_operating_systems()) if args.list_facilities: cluster.print_facilities(cluster.list_facilities()) if args.list_devices and args.project: cluster.print_devices( cluster.list_devices(project, conditions=args.only)) if args.create_volume: if len(args.create_volume) < 4: logger.error( 'Not enough arguments for creating a volume storage.') return 1 [plan, size, region, label] = args.create_volume try: cluster.create_volume(project, plan, size, region, label) except PacketManagerException as msg: logger.error(msg) return 1 # Device Pre-Checks if (args.create_spot or args.create_demand) \ and (args.region and args.plan): logging.info('Validating requested remote capacities ...') try: status = cluster.validate_capacity( [[args.region, args.plan, str(args.count)]]) if status: logger.info('Requested capacities are available.') except PacketManagerException as msg: logger.error(msg) return 1 # Device Creation if args.create_spot \ and args.region \ and args.plan \ and args.max_spot_price \ and args.os: try: devices = cluster.create_spot( project_id=project, facility=args.region, plan=args.plan, operating_system=args.os, spot_price_max=args.max_spot_price, tags=args.tags, userdata=userdata, count=args.count) cluster.print_devices(devices) except PacketManagerException as msg: logger.error(msg) return 1 if args.create_demand \ and args.region \ and args.plan \ and args.os: try: devices = cluster.create_demand(project_id=project, facility=args.region, plan=args.plan, tags=args.tags, operating_system=args.os, userdata=userdata, count=args.count) cluster.print_devices(devices) except PacketManagerException as msg: logger.error(msg) return 1 # Device Operations if args.reboot: try: cluster.reboot( cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 if args.stop: try: cluster.stop( cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 if args.terminate: try: cluster.terminate( cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 return 0
def test_macro_list_docker(): assert UserData.handle_tags(userdata_test_macro_docker, test_macros) == userdata_test_macro_docker_expected
def test_macro_list_export(): assert UserData.handle_tags(userdata_test_macro_export, test_macros) == userdata_test_macro_export_expected
def test_macro_defaults(): assert UserData.handle_tags(userdata_test_macros_with_defaults, test_macros) == userdata_test_macros_with_defaults_expected
def main(cls, args, settings=None, userdata=None): # pylint: disable=too-many-branches,too-many-statements """Main entry point of this module. """ # Packet Configuration logger.info('Using Packet configuration from %s', Focus.info(args.conf.name)) try: conf = json.loads(args.conf.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.conf.name, msg) return 1 # Handle Tags if args.tags and (args.create_spot or args.create_demand): logger.info('Assigning the following tags to the instance: %r', args.tags) if args.only: try: args.only = UserData.parse_only_criterias(args.only) except ValueError: logger.error('-only requires format of: name=value') return 1 logging.info('Using filter %r to return only matching devices.', args.only) # Packet Manager try: cluster = PacketManager(conf) except PacketManagerException as msg: logger.error(msg) return 1 project = cluster.conf.get('projects').get(args.project) # List Operations if args.list_projects: cluster.print_projects(cluster.list_projects()) if args.list_plans: cluster.print_plans(cluster.list_plans()) if args.list_spot_prices: cluster.print_spot_prices(cluster.list_spot_prices()) if args.list_operating_systems: cluster.print_operating_systems(cluster.list_operating_systems()) if args.list_facilities: cluster.print_facilities(cluster.list_facilities()) if args.list_devices and args.project: cluster.print_devices(cluster.list_devices(project, conditions=args.only)) if args.create_volume: if len(args.create_volume) < 4: logger.error('Not enough arguments for creating a volume storage.') return 1 [plan, size, region, label] = args.create_volume try: cluster.create_volume(project, plan, size, region, label) except PacketManagerException as msg: logger.error(msg) return 1 # Device Pre-Checks if (args.create_spot or args.create_demand) \ and (args.region and args.plan): logging.info('Validating requested remote capacities ...') try: status = cluster.validate_capacity([ [args.region, args.plan, str(args.count)] ]) if status: logger.info('Requested capacities are available.') except PacketManagerException as msg: logger.error(msg) return 1 # Device Creation if args.create_spot \ and args.region \ and args.plan \ and args.max_spot_price \ and args.os: try: devices = cluster.create_spot(project_id=project, facility=args.region, plan=args.plan, operating_system=args.os, spot_price_max=args.max_spot_price, tags=args.tags, userdata=userdata, count=args.count) cluster.print_devices(devices) except PacketManagerException as msg: logger.error(msg) return 1 if args.create_demand \ and args.region \ and args.plan \ and args.os: try: devices = cluster.create_demand(project_id=project, facility=args.region, plan=args.plan, tags=args.tags, operating_system=args.os, userdata=userdata, count=args.count) cluster.print_devices(devices) except PacketManagerException as msg: logger.error(msg) return 1 # Device Operations if args.reboot: try: cluster.reboot(cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 if args.stop: try: cluster.stop(cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 if args.terminate: try: cluster.terminate(cluster.list_devices(project, conditions=args.only)) except PacketManagerException as msg: logger.error(msg) return 1 return 0
def main(cls, args, settings=None, userdata=None): # pylint: disable=too-many-branches,too-many-statements args.only = UserData.convert_pair_to_dict(args.only or '') args.tags = UserData.convert_pair_to_dict(args.tags or '') args.image_args = UserData.convert_str_to_int( UserData.convert_pair_to_dict(args.image_args or {})) logger.info('Using image definition "%s" from %s', Focus.info(args.image_name), Focus.info(args.images.name)) try: images = json.loads(args.images.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.images.name, msg) return 1 if not userdata: return 1 images[args.image_name]['user_data'] = userdata.encode('utf-8') if args.image_args: logger.info( 'Setting custom image parameters for upcoming instances: %r ', args.image_args) images[args.image_name].update(args.image_args) logger.info('Using Boto configuration profile "%s"', Focus.info(args.profile)) if args.zone: for image_name in images: images[image_name]['placement'] = args.zone cluster = EC2Manager(images) try: cluster.connect(profile_name=args.profile, region=args.region) except EC2ManagerException as msg: logger.error(msg) return 1 if args.create_on_demand: try: cluster.create_on_demand(args.image_name, args.tags, args.root_device_type, args.ebs_size, args.ebs_volume_type, args.ebs_volume_delete_on_termination) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.create_spot: try: cluster.create_spot(args.max_spot_price, args.image_name, args.tags, args.root_device_type, args.ebs_size, args.ebs_volume_type, args.ebs_volume_delete_on_termination) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.stop: try: cluster.stop(cluster.find(filters=args.only), int(args.stop)) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.terminate: try: cluster.terminate(cluster.find(filters=args.only), int(args.terminate)) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.status: try: for i in cluster.find(filters=args.only): logger.info('%s is %s at %s - tags: %s', i.id, i.state, i.ip_address, i.tags) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 if args.run: ssh = settings.get('SSH') if not ssh: logger.error('No SSH settings defined in %s', args.settings.name) return 1 identity = ssh.get('identity') if not identity: logger.error('Key for SSH is not defined.') return 1 identity = os.path.expanduser(identity) username = ssh.get('username') if not username: logger.error('User for SSH is not defined.') return 1 logger.info('Bucketing available instances.') hosts = [] try: for host in cluster.find(filters=args.only): hosts.append(host) except boto.exception.EC2ResponseError as msg: logger.error(msg) return 1 logger.info('Executing remote commands on %d instances.', len(hosts)) return 0