def validate_locale_path_args(self): # Validate locale dir path argument if self.args.locale_path and not os.path.exists(self.args.locale_path): self.root_logger.error( "The given locale directory path does not exists : %s" % self.args.locale_path) raise CommandError('Error exit') if self.args.locale_path and not os.path.isdir(self.args.locale_path): self.root_logger.error( "The given locale directory path is not a directory") raise CommandError('Error exit') # Validate the required argument when kind is 'django', needed to do a trick for a POT file if self.args.kind == 'django': if not self.args.django_default_locale: self.root_logger.error( "For 'django' kind you have to give a default locale directory name (relative to 'locale_path') with '--django_default_locale'" ) raise CommandError('Error exit') default_locale_path = os.path.join(self.args.locale_path, self.args.django_default_locale) if not os.path.exists(default_locale_path) or not os.path.isdir( default_locale_path): self.root_logger.error( "The default locale path does not exists or is not a directory: %s" % default_locale_path) raise CommandError('Error exit')
def find_elem_list(tag, name_list, _all=False): """find a list of Element with: :param tag: Element tag to find :param name_list: names of Element to find :param _all: (Default value = False) The form is: <{tag}><name>text</name></{tag}> """ if name_list is None: name_list = [] root = conf res_elem_list = [] if _all and len(name_list) > 0: raise CommandError("Cannot have both '-all' and a list of names") if not _all and len(name_list) == 0: raise CommandError("Must have either '-all' or a list of names") for name in name_list: res = root.find('./{}[name="{}"]'.format(tag, name)) if res is None: raise CommandError("{} '{} not found'".format(tag, name)) res_elem_list.append(res) if _all: res_elem_list = root.findall(tag) return res_elem_list
def validate_eggs_args(self): # Validate eggs path argument if self.args.eggs and not os.path.exists(self.args.eggs): self.root_logger.error("The given eggs path does not exists") raise CommandError('Error exit') if self.args.eggs and not os.path.isdir(self.args.eggs): self.root_logger.error("The given eggs path is not a directory") raise CommandError('Error exit')
def runserver(args): """ Launch the project watcher to automatically re-build knowed elements on changes """ root_logger = init_logging(args.loglevel.upper(), printout=not(args.silent), logfile=args.logfile) # Only load optimus stuff after the settings module name has been retrieved os.environ['OPTIMUS_SETTINGS_MODULE'] = args.settings from optimus.conf import settings from optimus.utils import display_settings display_settings(settings, ('DEBUG', 'PROJECT_DIR','PUBLISH_DIR','STATIC_DIR','STATIC_URL')) # Parse given hostname address, port = ("127.0.0.1", "80") _splits = args.hostname.split(':') if len(_splits)>2: raise CommandError("Error: Invalid hostname format, too many ':'") elif len(_splits)==2: address, port = _splits if not port or not address: raise CommandError("Error: Invalid hostname format, address or port is empty") else: port = _splits[0] try: int(port) except ValueError: raise CommandError("Error: Invalid port given: {0}".format(port)) if not os.path.exists(settings.PUBLISH_DIR): raise CommandError("Error: Publish directory does not exist yet, you should build it before") # Run server with publish directory served with tools.staticdir print("Running HTTP server on address {address} with port {port}".format(address=address, port=port)) cherrypy.config.update({ 'server.socket_host': address, 'server.socket_port': int(port), 'engine.autoreload_on': False, }) conf = { '/': { 'tools.staticdir.index': 'index.html', 'tools.staticdir.on': True, 'tools.staticdir.dir': settings.PUBLISH_DIR, }, } cherrypy.quickstart(None, '/', config=conf)
def pull(args): """ Get the PO tarball TODO: need to use the 'kind' argument to request the right tarball for django or optimus (changing name for catalog files) """ interface = CliInterfaceBase(args) interface.open_config() interface.validate_authentication_args() interface.validate_slug_args() interface.validate_locale_path_args() interface.connect() # Pull the tarball try: project_id, project_slug = interface.con.pull(args.project_slug, args.locale_path, args.kind) except ProjectDoesNotExistException as e: interface.root_logger.error(e) raise CommandError('Error exit') else: interface.args.project_id, interface.args.project_slug = project_id, project_slug interface.save_config() interface.close()
def run_in_container(a_cmd, _interactive=False, _detached=False): """execute a system command possibly inside the docker container :param a_cmd: :param _interactive: (Default value = False) """ if is_running_in_docker(): opts = "-ti" if _interactive else "-t" container_name = get_container_name() if not container_name: raise CommandError( "No docker container name defined in qdeploy.conf") container_exec = ["docker", "exec", opts, container_name] if isinstance(a_cmd, str): a_cmd = shlex.split(a_cmd) cmd_to_execute = container_exec + a_cmd else: cmd_to_execute = a_cmd res = cmd(cmd_to_execute, _log=logger, _detached=_detached) if _detached: res.wait() res.print_on_error() return res
def lcrs_subprocess(command_args=""): """ Spawns a subprocess with the lcrs_embedded command, maintaining coverage metrics if coverage is installed. """ try: import coverage # @UnusedImport # noqa interpreter = "coverage run -p" except ImportError: interpreter = sys.executable p = subprocess.Popen( shlex.split(interpreter + " -m lcrs_embedded {}".format(command_args)), stderr=subprocess.PIPE, ) # Give the new process enough time to start, coverage is also started # Todo... how can we replace this with a better check? 0.8 is because of # Travis slowness.. time.sleep(0.8) p.send_signal(signal.SIGINT) stdout, stderr = p.communicate() if p.returncode > 0: raise CommandError( ("Non-zero returncode. Returncode was {}.\n\nstdout:\n{}" "\n\nstderr:\n{}").format( p.returncode, stdout, stderr, )) return p
def validate_env_args(self): # Validate environment kind if self.args.env not in ENVIRONMENT_KIND_KEYS: self.root_logger.error( "Invalid environment given '%s'. Valid choices are: %s", self.args.env, ', '.join(ENVIRONMENT_KIND_KEYS)) raise CommandError('Error exit')
def validate_website_args(self): # Validate required argument to register if not self.args.name or not self.args.url or not self.args.env: self.root_logger.error( "'name', 'url' and 'env' are required arguments to register the current environment" ) raise CommandError('Error exit')
def control(args): """Control the stack: update the AutoScaleGroup constraints.""" config, settings, sinfo = initialize_from_cli(args) stack = find_one_stack(args.stack_name, summary=False) yield format_stack_summary(stack) + '\n' asg = find_one_resource(stack, RES_TYPE_ASG) yield format_autoscale(asg) if args.min or args.max or args.desired: try: asg.min_size = int(args.min) asg.max_size = int(args.max) asg.desired_capacity = int(args.desired) except ValueError as error: raise CommandError("Invalid value for ASG: %s" % error) yield "Change to:" yield format_autoscale(asg) warn_for_live(sinfo) confirm_action(arg, default=True) asg.update() yield "Updated"
def validate_authentication_args(self): # Validate required arguments to connect if not self.args.user or not self.args.password or not self.args.host: self.root_logger.error( "'user', 'password' and 'hostname' are required arguments to connect to the service" ) raise CommandError('Error exit')
def register(args): """ Register current environment """ interface = CliInterfaceBase(args) interface.open_config() interface.validate_authentication_args() interface.validate_website_args() interface.validate_eggs_args() interface.validate_env_args() interface.validate_url_args() interface.connect() # Register meta datas register_args = [ interface.args.name, interface.args.url, interface.args.env ] if interface.args.server: register_args.append(interface.args.server) try: interface.args.website_id, interface.args.environment_id = interface.con.register( *register_args) except WebsitePostException as e: interface.root_logger.error(e) raise CommandError('Error exit') # Update server host for saving in config file interface.args.server = interface.con.environment_server # Register egg list if interface.args.eggs: try: interface.con.register_eggs(interface.args.eggs) except WebsitePostException as e: interface.root_logger.error(e) raise CommandError('Error exit') interface.save_config() interface.close()
def do_stop_docker(): """stop docker container by calling the .qdeploy/stop_docker.sh script """ os.chdir(QDEPLOY_RESOURCES_DIR) container_name = get_container_name() if not container_name: raise CommandError("No docker container name defined in qdeploy.conf") res = cmd("./stop_docker.sh {container}", container=container_name, _log=logger) res.print_on_error() print(res.out)
def connect(self): """ Connect to the PO Project API service """ # Open client self.con = GestusClient(self.args.host, (self.args.user, self.args.password)) # Connect to the service try: self.con.connect() except (HTTPError, ConnectionError, InvalidSchema) as e: import traceback top = traceback.extract_stack()[-1] self.root_logger.error("%s: %s", type(e).__name__, e) raise CommandError('Error exit')
def delete_build_configuration_raw(id=None, name=None): to_delete_id = common.set_id(pnc_api.build_configs, id, name) # ensure that this build configuration is not a dependency of any other build configuration. # list_build_configurations is an insufficient check because eventually there will be too many entities to check them all. # a better REST method for dependency checking is needed for config in list_build_configurations_raw(page_size=1000000000): dep_ids = [str(val) for val in config.dependency_ids] if dep_ids is not None and str(to_delete_id) in dep_ids: raise CommandError( "BuildConfiguration ID {} is a dependency of BuildConfiguration {}." .format(to_delete_id, config.name)) response = utils.checked_api_call(pnc_api.build_configs, 'delete_specific', id=to_delete_id) if response: return response.content
def check(path, pre_releases=False, legacy_versions=False, verbose=False, packages=[], skip_packages=[], line=None, version=False): setup_logging(verbose) if version: logger.info(__version__) return if line is None and path is None: raise CommandError('at least one of path or line is required') total_updates = 0 session = PipSession() finder = PackageFinder(find_links=[], index_urls=[], session=session) for requirement in get_requirements(path, line, session, finder): if ((packages and requirement.name not in packages) or (skip_packages and requirement.name in skip_packages) or requirement.editable): continue if not is_version_locked(requirement): logger.warn('Version of %s not locked', requirement.name) continue updates = get_updates(requirement=requirement, legacy_versions=legacy_versions, pre_releases=pre_releases, index_urls=finder.index_urls) if not updates: continue logger.info('Updates for %s found: %s', requirement.name, [str(version) for version in sorted(updates)]) total_updates += len(updates) logger.info('%d updates found', total_updates)
def delete(args): """Delete a stack.""" config, settings, sinfo = initialize_from_cli(args) stack = find_one_stack(args.stack_name) yield format_stack_summary(stack) warn_for_live(sinfo) confirm_action(arg, default=True) try: res = boto.connect_cloudformation().delete_stack(stack.stack_name) except BotoServerError as error: if error: raise CommandError("BotoServerError: " + error.error_message) else: raise error print("Result %s" % res)
def metrics(args): """Control the metrics collection activation.""" stack = find_one_stack(args.stack_name, summary=False) yield format_stack_summary(stack) + '\n' asg = find_one_resource(stack, RES_TYPE_ASG) yield format_autoscale(asg) if args.enable and args.disable: raise CommandError("Option --enable and --disable are not compatible") elif args.enable: asg.connection.enable_metrics_collection(asg.name, granularity='1Minute') yield "Updated" elif args.disable: asg.connection.disable_metrics_collection(asg.name) yield "Updated" else: yield "Metrics collection:" for metric in asg.enabled_metrics: yield " %s" % metric
def create(args): """Create a stack.""" config, settings, sinfo = initialize_from_cli(args) # Read template template_path = os.path.join( config.get("cfn", "templatedir"), args.template if args.template else sinfo['template']) template = cfntemplate.CfnTemplate(template_path) parameters = cfntemplate.CfnParameters(template, sinfo) tags = { 'Name': args.stack_name, 'Application': sinfo['Application'], 'Environment': sinfo['Environment'], 'Type': sinfo['Type'], } print("\nStack name: {args.stack_name}\n" "\nTemplate: {template!r}\n" "\nTags: {tags!r}\n" "\nParameters:\n{parameters!r}\n".format(args=args, template=template, parameters=parameters, tags=tags)) confirm_action(arg, default=True) try: stackid = boto.connect_cloudformation().create_stack( args.stack_name, template_body=template.body, parameters=parameters, tags=tags, capabilities=['CAPABILITY_IAM']) print("StackId %s" % stackid) except BotoServerError as error: if error.error_message: raise CommandError("BotoServerError: " + error.error_message) else: raise error
def do_start_docker(): """start docker container by calling the .qdeploy/start_docker.sh script """ mounts = "" use_x11 = "false" root = conf container_name = get_container_name() if not container_name: raise CommandError("No docker container name defined in qdeploy.conf") for m in root.iterfind("docker/mount"): mount = m.text if ":" in m.text else m.text + ":" + m.text mounts += " -v " + mount x11_node = root.find("docker/x11") if x11_node is not None: use_x11 = x11_node.text res = cmd(["./start_docker.sh", container_name, use_x11, mounts], _log=logger, _cwd=QDEPLOY_RESOURCES_DIR) res.exit_on_error()
def update(args): """Update a stack.""" config, settings, sinfo = initialize_from_cli(args) # Read template template = cfntemplate.CfnTemplate( os.path.join( config.get("cfn", "templatedir"), args.template if args.template else sinfo['template'] ) ) parameters = cfntemplate.CfnParameters(template, sinfo) print("\nStack name: {args.stack_name}\n" "\nTemplate: {template!r}\n" "\nParameters:\n" "{parameters!r}\n".format(args=args, template=template, parameters=parameters)) warn_for_live(sinfo) confirm_action(arg, default=True) try: stackid = boto.connect_cloudformation().update_stack( args.stack_name, template_body=template.body, parameters=parameters, capabilities=['CAPABILITY_IAM']) print("StackId %s" % stackid) except BotoServerError as error: if error.error_message: raise CommandError("BotoServerError: " + error.error_message) else: raise error
def start(args): """Start the stack: set AutoScale control to configured values.""" config, settings, sinfo = initialize_from_cli(args) stack = find_one_stack(args.stack_name, summary=False) print(format_stack_summary(stack)) asg = find_one_resource(stack, RES_TYPE_ASG) yield format_autoscale(asg) try: asg.min_size = int(sinfo['AutoScaleMinSize']) asg.max_size = int(sinfo['AutoScaleMaxSize']) asg.desired_capacity = int(sinfo['AutoScaleDesiredCapacity']) except ValueError as error: raise CommandError("Invalid values in stack definition: %s" % error) yield "Change to:" yield format_autoscale(asg) warn_for_live(sinfo) confirm_action(arg, default=True) asg.update() yield "Updated"
def push(args): """ Send the current local POT file """ interface = CliInterfaceBase(args) interface.open_config() interface.validate_authentication_args() interface.validate_slug_args() interface.validate_locale_path_args() interface.connect() # Push the POT try: interface.con.push(args.project_slug, args.locale_path, args.kind, args.django_default_locale) except (ProjectDoesNotExistException, PotDoesNotExistException) as e: interface.root_logger.error(e) raise CommandError('Error exit') interface.save_config() interface.close()
def cmd_stop_vm(vm_names, stop_all=False, shutdown=False, reboot=False, group=None): """stop and undefine one or several vms. By default the vm is destroyed. """ assert_conf() if shutdown and reboot: raise CommandError("Cannot have both '--shutdown' and '--reboot'") if shutdown: stop_mode = StopMode.SHUTDOWN elif reboot: stop_mode = StopMode.REBOOT else: stop_mode = StopMode.DESTROY if group is not None: vm_names = get_vm_group(group) vm_list = find_elem_list("vm", vm_names, stop_all) for vm in vm_list: do_stop_vm(vm, stop_mode)
def warn_for_live(sinfo): if sinfo['live'] and sinfo['Environment'] == 'production': if not confirm("WARNING: Updating a live stack! Are you sure? "): raise CommandError("Aborted")
def runserver(args): """ Launch the project watcher to automatically re-build knowed elements on changes """ root_logger = init_logging(args.loglevel.upper(), printout=not(args.silent), logfile=args.logfile) raise CommandError("Error: Unable to import CherryPy, you should install it with 'pip install cherrypy'")
def migrate_cfg(args): """Migrate the stack: re-instantiate all instances.""" config, settings, sinfo = initialize_from_cli(args) stack = find_one_stack(args.stack_name, summary=False) print(format_stack_summary(stack)) asg = find_one_resource(stack, RES_TYPE_ASG) yield format_autoscale(asg) orig_min = asg.min_size orig_max = asg.max_size orig_desired = asg.desired_capacity orig_term_pol = asg.termination_policies mig_min = orig_desired * 2 mig_max = orig_desired * 2 mig_desired = orig_desired * 2 mig_term_pol = [u'OldestLaunchConfiguration', u'OldestInstance'] if orig_desired != len(asg.instances): raise CommandError("The ASG is not stable (desired != instances)") for instance in asg.instances: if instance.health_status != 'Healthy': raise CommandError("The ASG is not stable (instance not healthy)") warn_for_live(sinfo) confirm_action(arg, default=True) yield "\n <> Setting termination policy to %s" % mig_term_pol asg.termination_policies = mig_term_pol asg.update() yield "\n <> Growing the desired capacity from %s to %s" % ( orig_desired, mig_desired) asg.min_size = mig_min asg.max_size = mig_max asg.desired_capacity = mig_desired asg.update() yield "\n <> Waiting instances to stabilize..." while True: sleep(30) asg = find_one_resource(stack, RES_TYPE_ASG) res_elb_id = find_one_resource(stack, RES_TYPE_ELB, only_id=True) elbinstances = boto.connect_elb().describe_instance_health(res_elb_id) if len(asg.instances) < mig_desired: yield " NOTYET: only %i instances created" % len(asg.instances) continue elif [i for i in asg.instances if i.health_status != 'Healthy']: yield " NOTYET: still unhealthy instances" continue elif len(elbinstances) < mig_desired: yield " NOTYET: only %i instances in ELB" % len(elbinstances) continue elif [i for i in elbinstances if i.state != 'InService']: yield " NOTYET: not all instances are ELB:InService" continue else: yield " OK: %s healthy instances in ELB" % len(asg.instances) break yield "\n <> Checking new ASG state..." asg = find_one_resource(stack, RES_TYPE_ASG) yield format_autoscale(asg) yield format_autoscale_instances(stack) yield "\n <> Restoring previous ASG control:" asg.termination_policies = orig_term_pol asg.min_size = orig_min asg.max_size = orig_max asg.desired_capacity = orig_desired yield format_autoscale(asg) if confirm('Restoring ASG config?', default=True): try: asg.update() except BotoServerError as error: yield "\n <> Restoration failed!" yield error else: yield "\n <> ASG control restored." else: yield "WARNING: The ASG desired capacity was doubled!"
def confirm_action(arg, action="action", default=False): if hasattr(arg, 'force') and arg.force: return if not confirm('Confirm %s? ' % action, default=default): raise CommandError("Aborted")
def schedulevalidator( pm_id, db_url=None, timezone=None, site_url=None, outputfile=None, strip_time=None, working_calendar=None, verbosity=None ): """ Produces a report to verify PM Schedules in TRIRIGA. Example: axiom --db-url=tridata/tridata@localhost:1521:xe --site-url=http://localhost:9080 --timezone=US/Eastern """ set_verbosity(verbosity) if site_url.endswith("/"): site_url = site_url[:-1] db = parse_db_url(db_url) logging.debug("Using database connection: " + str(db)) connection = db.get_connection() if pm_id: # Get selected PM Schedules pms_sql = SQL_GET_PMSCHEDS_FILTERED.format(', '.join(["'{}'".format(i) for i in pm_id])) pms = execute(pms_sql, connection) if len(pm_id) != len(pms): logging.warning("Expected {} PMs, only got {}.".format(len(pm_id), len(pms))) else: # Get ALL PM Schedules pms = execute(SQL_GET_PMSCHEDS, connection) if len(pms) <= 0: raise CommandError("No PM Schedules found in {}".format(db)) workbook = xlsxwriter.Workbook(outputfile) index_worksheet = workbook.add_worksheet(name="Index") xlFormats['bad'] = workbook.add_format({'bg_color': '#FFC7CE', 'font_color': '#9C0006'}) xlFormats['good'] = workbook.add_format({'bg_color': '#C6EFCE', 'font_color': '#006100'}) xlFormats['header'] = workbook.add_format({'bold': True, 'text_wrap': True}) xlFormats['rrule'] = workbook.add_format({'text_wrap': True, 'valign': 'top'}) # Validate each one for pm in tqdm(pms): try: group = (pm['TRITASKGROUPINGRULELI'] == 'Create Task For Each Asset/Location') validate_pm(pm['TRIIDTX'], pm['TRINAMETX'], workbook, connection, timezone, strip_time, working_calendar, group, verbosity) except Exception as e: logging.exception("Unable to validate a PM. Ignore and continue.") if verbosity > 0: print() populate_index_worksheet(index_worksheet, pms, site_url) workbook.close()
def connect(args): """SSH to multiple EC2 instances by name, instance-id or private ip.""" if args.completion_list: try: yield " ".join(read_completion_list()) except IOError: pass elif args.completion_script: yield BASH_COMPLETION_INSTALL_SCRIPT elif args.list: instances = ec2.get_instances() names = sorted([ec2.get_name(i) for i in instances]) yield '\n'.join(names) elif args.instance is None: raise CommandError("No instances specified.") else: if args.confirm and args.yes: raise CommandError("Option confirm and yes are not compatible") try: instances = ec2.get_instances() write_completion_list(instances) specifiers = args.instance.lower().strip().split(',') instances = ec2.filter_instances(specifiers, instances) if len(instances) == 0: raise CommandError("No instances found.") except KeyboardInterrupt: raise CommandError("Killed while accessing AWS api.") if args.one: instances = instances[0:1] if len(instances) > 1 or args.confirm: args.verbose = True if len(instances) > 1 and not args.yes: args.confirm = True if args.verbose and args.command: yield '----- Command: %s' % ' '.join(args.command) if args.verbose: names = sorted([ec2.get_name(i) for i in instances]) yield '----- Instances(%s): %s' % (len(names), ",".join(names)) if args.confirm: if not argh.confirm('Connect to all instances (y) or just one (n)', default=True): instances = [instances[0]] if len(instances) == 1: host = instances[0].public_dns_name try: os.execvp('ssh', ['ec2ssh', host] + args.command) except OSError as error: raise Exception("Failed to call the ssh command: %s" % error) else: for instance in instances: if args.verbose: yield "----- %s: %s %s" % ( instance.id, instance.public_dns_name, instance.private_ip_address, ) host = instance.public_dns_name subprocess.call(['ssh', host] + args.command) if args.verbose: yield '----- DONE'