def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str).as_(Data.MB) disk = parse_data(disk_str).as_(Data.MB) client = make_admin_client_with_options(cluster) resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota resource_details = ResourceManager.resource_details_from_quota(quota) log.info('Current quota for %s:\n\t%s' % ( role, '\n\t'.join('%s\t%s%s' % ( r.resource_type.display_name, r.value, r.resource_type.display_unit) for r in resource_details))) new_cpu = ResourceType.CPUS.value_type( cpu + ResourceManager.quantity_of(resource_details, ResourceType.CPUS)) new_ram = ResourceType.RAM_MB.value_type( ram + ResourceManager.quantity_of(resource_details, ResourceType.RAM_MB)) new_disk = ResourceType.DISK_MB.value_type( disk + ResourceManager.quantity_of(resource_details, ResourceType.DISK_MB)) log.info('Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, new_cpu, new_ram, new_disk)) resp = client.set_quota(role, new_cpu, new_ram, new_disk) check_and_log_response(resp)
def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str).as_(Data.MB) disk = parse_data(disk_str).as_(Data.MB) client = make_admin_client_with_options(cluster) resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota resource_details = ResourceManager.resource_details_from_quota(quota) log.info('Current quota for %s:\n\t%s' % (role, '\n\t'.join( '%s\t%s%s' % (r.resource_type.display_name, r.value, r.resource_type.display_unit) for r in resource_details))) new_cpu = ResourceType.CPUS.value_type( cpu + ResourceManager.quantity_of(resource_details, ResourceType.CPUS)) new_ram = ResourceType.RAM_MB.value_type( ram + ResourceManager.quantity_of(resource_details, ResourceType.RAM_MB)) new_disk = ResourceType.DISK_MB.value_type( disk + ResourceManager.quantity_of(resource_details, ResourceType.DISK_MB)) log.info( 'Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, new_cpu, new_ram, new_disk)) resp = client.set_quota(role, new_cpu, new_ram, new_disk) check_and_log_response(resp)
def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str) disk = parse_data(disk_str) client = make_admin_client(cluster) resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota log.info('Current quota for %s:\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, quota.numCpus, quota.ramMb, quota.diskMb)) new_cpu = float(cpu + quota.numCpus) new_ram = int((ram + Amount(quota.ramMb, Data.MB)).as_(Data.MB)) new_disk = int((disk + Amount(quota.diskMb, Data.MB)).as_(Data.MB)) log.info( 'Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, new_cpu, new_ram, new_disk)) resp = client.set_quota(role, new_cpu, new_ram, new_disk) check_and_log_response(resp)
def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str) disk = parse_data(disk_str) options = app.get_options() client = AuroraClientAPI(CLUSTERS[cluster], options.verbosity == "verbose") resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota log.info( "Current quota for %s:\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB" % (role, quota.numCpus, quota.ramMb, quota.diskMb) ) new_cpu = cpu + quota.numCpus new_ram = ram + Amount(quota.ramMb, Data.MB) new_disk = disk + Amount(quota.diskMb, Data.MB) log.info( "Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB" % (role, new_cpu, new_ram.as_(Data.MB), new_disk.as_(Data.MB)) ) resp = client.set_quota(role, new_cpu, new_ram.as_(Data.MB), new_disk.as_(Data.MB)) check_and_log_response(resp)
def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str) disk = parse_data(disk_str) options = app.get_options() client = AuroraClientAPI(CLUSTERS[cluster], options.verbosity == 'verbose') resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota log.info('Current quota for %s:\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, quota.numCpus, quota.ramMb, quota.diskMb)) new_cpu = cpu + quota.numCpus new_ram = ram + Amount(quota.ramMb, Data.MB) new_disk = disk + Amount(quota.diskMb, Data.MB) log.info( 'Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, new_cpu, new_ram.as_(Data.MB), new_disk.as_(Data.MB))) resp = client.set_quota(role, new_cpu, new_ram.as_(Data.MB), new_disk.as_(Data.MB)) check_and_log_response(resp)
def gc(args, options): """Garbage collect task(s) and task metadata. Usage: thermos gc [options] [task_id1 task_id2 ...] If tasks specified, restrict garbage collection to only those tasks, otherwise all tasks are considered. The optional constraints are still honored. """ print('Analyzing root at %s' % options.root) gc_options = {} if options.max_age is not None: gc_options['max_age'] = parse_time(options.max_age) if options.max_space is not None: gc_options['max_space'] = parse_data(options.max_space) if options.max_tasks is not None: gc_options['max_tasks'] = int(options.max_tasks) gc_options.update(include_metadata=not options.keep_metadata, include_logs=not options.keep_logs, verbose=True, logger=print) if args: gc_tasks = list(tasks_from_re(args, state='finished')) else: print('No task ids specified, using default collector.') gc_tasks = [(task.checkpoint_root, task.task_id) for task in GarbageCollectionPolicy(get_path_detector(), **gc_options).run()] if not gc_tasks: print('No tasks to garbage collect. Exiting') return def maybe(function, *args): if options.dryrun: print(' would run %s%r' % (function.__name__, args)) else: function(*args) value = 'y' if not options.force: value = raw_input("Continue [y/N]? ") or 'N' if value.lower() == 'y': print('Running gc...') for checkpoint_root, task_id in gc_tasks: tgc = TaskGarbageCollector(checkpoint_root, task_id) print(' Task %s ' % task_id, end='') print('data (%s) ' % ('keeping' if options.keep_data else 'deleting'), end='') print('logs (%s) ' % ('keeping' if options.keep_logs else 'deleting'), end='') print('metadata (%s) ' % ('keeping' if options.keep_metadata else 'deleting')) if not options.keep_data: maybe(tgc.erase_data) if not options.keep_logs: maybe(tgc.erase_logs) if not options.keep_metadata: maybe(tgc.erase_metadata) print('done.') else: print('Cancelling gc.')
def parse_size(size): """Return the resources specified in 'size' as a dictionary.""" if not size: resources = dict(cpus=DEFAULT_TASK_CPUS, mem=DEFAULT_TASK_MEM, disk=DEFAULT_TASK_DISK) else: # TODO(jyx): Simplify this using T-shirt sizing # (https://github.com/twitter/mysos/issues/14). try: resources_ = json.loads(size) resources = dict( cpus=float(resources_['cpus']), mem=parse_data(resources_['mem']), disk=parse_data(resources_['disk'])) except (TypeError, KeyError, ValueError, InvalidData): raise ValueError("'size' should be a JSON dictionary with keys 'cpus', 'mem' and 'disk'") return resources
def set_quota(cluster, role, cpu_str, ram, disk): """usage: set_quota cluster role cpu ram[MGT] disk[MGT] Alters the amount of production quota allocated to a user. """ try: ram_size = parse_data(ram).as_(Data.MB) disk_size = parse_data(disk).as_(Data.MB) except ValueError as e: die(str(e)) try: cpu = float(cpu_str) ram_mb = int(ram_size) disk_mb = int(disk_size) except ValueError as e: die(str(e)) resp = make_admin_client(cluster).set_quota(role, cpu, ram_mb, disk_mb) check_and_log_response(resp)
def parse_size(size): """Return the resources specified in 'size' as a dictionary.""" if not size: resources = dict(cpus=DEFAULT_TASK_CPUS, mem=DEFAULT_TASK_MEM, disk=DEFAULT_TASK_DISK) else: # TODO(jyx): Simplify this using T-shirt sizing # (https://github.com/twitter/mysos/issues/14). try: resources_ = json.loads(size) resources = dict(cpus=float(resources_['cpus']), mem=parse_data(resources_['mem']), disk=parse_data(resources_['disk'])) except (TypeError, KeyError, ValueError, InvalidData): raise ValueError( "'size' should be a JSON dictionary with keys 'cpus', 'mem' and 'disk'" ) return resources
def set_quota(cluster, role, cpu_str, ram, disk): """usage: set_quota cluster role cpu ram[MGT] disk[MGT] Alters the amount of production quota allocated to a user. """ try: ram_size = parse_data(ram).as_(Data.MB) disk_size = parse_data(disk).as_(Data.MB) except ValueError as e: die(str(e)) try: cpu = float(cpu_str) ram_mb = int(ram_size) disk_mb = int(disk_size) except ValueError as e: die(str(e)) options = app.get_options() resp = AuroraClientAPI(CLUSTERS[cluster], options.verbosity).set_quota(role, cpu, ram_mb, disk_mb) check_and_log_response(resp)
def set_quota(cluster, role, cpu_str, ram, disk): """usage: set_quota cluster role cpu ram[MGT] disk[MGT] Alters the amount of production quota allocated to a user. """ try: ram_size = parse_data(ram).as_(Data.MB) disk_size = parse_data(disk).as_(Data.MB) except ValueError as e: die(str(e)) try: cpu = float(cpu_str) ram_mb = int(ram_size) disk_mb = int(disk_size) except ValueError as e: die(str(e)) options = app.get_options() resp = AuroraClientAPI(CLUSTERS[cluster], options.verbosity).set_quota(role, cpu, ram_mb, disk_mb) check_and_log_response(resp)
def increase_quota(cluster, role, cpu_str, ram_str, disk_str): """usage: increase_quota cluster role cpu ram[unit] disk[unit] Increases the amount of production quota allocated to a user. """ cpu = float(cpu_str) ram = parse_data(ram_str) disk = parse_data(disk_str) client = make_admin_client(cluster) resp = client.get_quota(role) quota = resp.result.getQuotaResult.quota log.info('Current quota for %s:\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, quota.numCpus, quota.ramMb, quota.diskMb)) new_cpu = float(cpu + quota.numCpus) new_ram = int((ram + Amount(quota.ramMb, Data.MB)).as_(Data.MB)) new_disk = int((disk + Amount(quota.diskMb, Data.MB)).as_(Data.MB)) log.info('Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' % (role, new_cpu, new_ram, new_disk)) resp = client.set_quota(role, new_cpu, new_ram, new_disk) check_and_log_response(resp)
def gc(args, options): """Garbage collect task(s) and task metadata. Usage: thermos gc [options] [task_id1 task_id2 ...] If tasks specified, restrict garbage collection to only those tasks, otherwise all tasks are considered. The optional constraints are still honored. Options: --max_age=AGE Max age in quasi-human readable form, e.g. --max_age=2d5h, format *d*h*m*s [default: skip] --max_tasks=NUM Max number of tasks to keep [default: skip] --max_space=SPACE Max space to allow for tasks [default: skip] --[keep/delete-]metadata Garbage collect metadata [default: keep] --[keep/delete-]logs Garbage collect logs [default: keep] --[keep/delete-]data Garbage collect data [default: keep] WARNING: Do NOT do this if your sandbox is $HOME. --force Perform garbage collection without confirmation [default: false] --dryrun Don't actually run garbage collection [default: false] """ print('Analyzing root at %s' % options.root) gc_options = {} if options.max_age is not None: gc_options['max_age'] = parse_time(options.max_age) if options.max_space is not None: gc_options['max_space'] = parse_data(options.max_space) if options.max_tasks is not None: gc_options['max_tasks'] = int(options.max_tasks) gc_options.update(include_data=not options.keep_data, include_metadata=not options.keep_metadata, include_logs=not options.keep_logs, verbose=True, logger=print) tgc = TaskGarbageCollector(root=options.root) if args: gc_tasks = tasks_from_re(args, options.root, state='finished') else: print('No task ids specified, using default collector.') gc_tasks = [task.task_id for task in DefaultCollector(tgc, **gc_options).run()] if not gc_tasks: print('No tasks to garbage collect. Exiting') return def maybe(function, *args): if options.dryrun: print(' would run %s%r' % (function.__name__, args)) else: function(*args) value = 'y' if not options.force: value = raw_input("Continue [y/N]? ") or 'N' if value.lower() == 'y': print('Running gc...') tgc = TaskGarbageCollector(root=options.root) for task in gc_tasks: print(' Task %s ' % task, end='') print('data (%s) ' % ('keeping' if options.keep_data else 'deleting'), end='') print('logs (%s) ' % ('keeping' if options.keep_logs else 'deleting'), end='') print('metadata (%s) ' % ('keeping' if options.keep_metadata else 'deleting')) if not options.keep_data: maybe(tgc.erase_data, task) if not options.keep_logs: maybe(tgc.erase_logs, task) if not options.keep_metadata: maybe(tgc.erase_metadata, task) print('done.') else: print('Cancelling gc.')