def unload(args): """`conduct unload` command""" log = logging.getLogger(__name__) path = 'bundles/{}'.format(args.bundle) url = conduct_url.url(path, args) response = conduct_request.delete(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) log.info('Bundle unload request sent.') response_json = json.loads(response.text) if not args.no_wait: bundle_installation.wait_for_uninstallation(response_json['bundleId'], args) if not args.disable_instructions: log.info('Print ConductR info with: {} info{}'.format( args.command, args.cli_parameters)) if not log.is_info_enabled() and log.is_quiet_enabled(): log.quiet(response_json['bundleId']) return True
def stop(args): """`conduct stop` command""" log = logging.getLogger(__name__) path = "bundles/{}?scale=0".format(args.bundle) url = conduct_url.url(path, args) response = conduct_request.put( args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT, ) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json["bundleId"] if args.long_ids else bundle_utils.short_id(response_json["bundleId"]) log.info("Bundle stop request sent.") if not args.no_wait: bundle_scale.wait_for_scale(response_json["bundleId"], 0, args) if not args.disable_instructions: log.info("Unload bundle with: {} unload{} {}".format(args.command, args.cli_parameters, bundle_id)) log.info("Print ConductR info with: {} info{}".format(args.command, args.cli_parameters)) return True
def unload(args): """`conduct unload` command""" log = logging.getLogger(__name__) path = 'bundles/{}'.format(args.bundle) url = conduct_url.url(path, args) response = conduct_request.delete(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) log.info('Bundle unload request sent.') response_json = json.loads(response.text) if not args.no_wait: bundle_installation.wait_for_uninstallation(response_json['bundleId'], args) if not args.disable_instructions: log.info('Print ConductR info with: {} info{}'.format(args.command, args.cli_parameters)) if not log.is_info_enabled() and log.is_quiet_enabled(): log.quiet(response_json['bundleId']) return True
def events(args): """`conduct events` command""" log = logging.getLogger(__name__) request_url = conduct_url.url('bundles/{}/events?count={}'.format(quote_plus(args.bundle), args.lines), args) response = conduct_request.get(args.dcos_mode, conductr_host(args), request_url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) data = [ { 'time': validation.format_timestamp(event['timestamp'], args), 'event': event['event'], 'description': event['description'] } for event in json.loads(response.text) ] data.insert(0, {'time': 'TIME', 'event': 'EVENT', 'description': 'DESC'}) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) for row in data: log.screen('''\ {time: <{time_width}}{padding}\ {event: <{event_width}}{padding}\ {description: <{description_width}}{padding}'''.format(**dict(row, **column_widths)).rstrip()) return True
def run(args): """`conduct run` command""" log = logging.getLogger(__name__) if args.affinity is not None and args.api_version == '1': log.error('Affinity feature is only available for v1.1 onwards of ConductR') return elif args.affinity is not None: path = 'bundles/{}?scale={}&affinity={}'.format(args.bundle, args.scale, args.affinity) else: path = 'bundles/{}?scale={}'.format(args.bundle, args.scale) url = conduct_url.url(path, args) response = conduct_request.put(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json['bundleId'] if args.long_ids else bundle_utils.short_id(response_json['bundleId']) log.info('Bundle run request sent.') if not args.no_wait: bundle_scale.wait_for_scale(response_json['bundleId'], args.scale, args) if not args.disable_instructions: log.info('Stop bundle with: {} stop{} {}'.format(args.command, args.cli_parameters, bundle_id)) log.info('Print ConductR info with: {} info{}'.format(args.command, args.cli_parameters)) return True
def stop(args): """`conduct stop` command""" log = logging.getLogger(__name__) path = 'bundles/{}?scale=0'.format(args.bundle) url = conduct_url.url(path, args) response = conduct_request.put(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json[ 'bundleId'] if args.long_ids else bundle_utils.short_id( response_json['bundleId']) log.info('Bundle stop request sent.') if not args.no_wait: bundle_scale.wait_for_scale(response_json['bundleId'], 0, args) if not args.disable_instructions: log.info('Unload bundle with: {} unload{} {}'.format( args.command, args.cli_parameters, bundle_id)) log.info('Print ConductR info with: {} info{}'.format( args.command, args.cli_parameters)) return True
def events(args): """`conduct events` command""" log = logging.getLogger(__name__) request_url = conduct_url.url( 'bundles/{}/events?count={}'.format(quote_plus(args.bundle), args.lines), args) response = conduct_request.get(args.dcos_mode, conductr_host(args), request_url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) data = [{ 'time': validation.format_timestamp(event['timestamp'], args), 'event': event['event'], 'description': event['description'] } for event in json.loads(response.text)] data.insert(0, {'time': 'TIME', 'event': 'EVENT', 'description': 'DESC'}) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) for row in data: log.screen('''\ {time: <{time_width}}{padding}\ {event: <{event_width}}{padding}\ {description: <{description_width}}{padding}'''.format( **dict(row, **column_widths)).rstrip()) return True
def acls(args): """`conduct acls` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) def get_system_version(bundle): if 'systemVersion' in bundle['attributes']: return bundle['attributes']['systemVersion'] else: return bundle['attributes']['system'].split('-')[-1] def is_started(bundle_executions): for execution in bundle_executions: if execution['isStarted']: return 'Running' return 'Starting' all_acls = [{ 'acl': acl, 'system': bundle['attributes']['system'], 'system_version': get_system_version(bundle), 'endpoint_name': endpoint_name, 'bundle_id': bundle['bundleId'] if args.long_ids else bundle_utils.short_id(bundle['bundleId']), 'bundle_name': bundle['attributes']['bundleName'], 'status': is_started(bundle['bundleExecutions']) } for bundle in json.loads(response.text) if bundle['bundleExecutions'] for endpoint_name, endpoint in bundle['bundleConfig'] ['endpoints'].items() if 'acls' in endpoint for acl in endpoint['acls']] if args.protocol_family == 'http': http_acls = [acl for acl in all_acls if 'http' in acl['acl']] display_http_acls(log, http_acls) elif args.protocol_family == 'tcp': tcp_acls = [acl for acl in all_acls if 'tcp' in acl['acl']] display_tcp_acls(log, tcp_acls) return True
def get_deployment_state(deployment_id, args): deployment_state_url = conduct_url.url('deployments/{}'.format(deployment_id), args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), deployment_state_url, auth=args.conductr_auth, verify=args.server_verification_file) if response.status_code == 404: return None else: response.raise_for_status() deployment_state = json.loads(response.text) return deployment_state
def wait_for_condition(bundle_id, condition, condition_name, args): log = logging.getLogger(__name__) start_time = datetime.now() installed_bundles = count_installations(bundle_id, args) last_log_message = None if condition(installed_bundles): log.info('Bundle {} is {}'.format(bundle_id, condition_name)) return else: sse_heartbeat_count_after_event = 0 log.info('Bundle {} waiting to be {}'.format(bundle_id, condition_name)) bundle_events_url = conduct_url.url('bundles/events', args) sse_events = sse_client.get_events( args.dcos_mode, conduct_url.conductr_host(args), bundle_events_url, auth=args.conductr_auth, verify=args.server_verification_file) for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError('Bundle {} waiting to be {}'.format( bundle_id, condition_name)) # Check for installed bundles every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 installed_bundles = count_installations(bundle_id, args) if condition(installed_bundles): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info('Bundle {} {}'.format(bundle_id, condition_name)) return else: if last_log_message: last_log_message = '{}.'.format(last_log_message) else: last_log_message = 'Bundle {} still waiting to be {}'.format( bundle_id, condition_name) log.progress(last_log_message, flush=False) raise WaitTimeoutError('Bundle {} waiting to be {}'.format( bundle_id, condition_name))
def wait_for_scale(bundle_id, expected_scale, args): log = logging.getLogger(__name__) start_time = datetime.now() bundle_scale = get_scale(bundle_id, args) if bundle_scale == expected_scale: log.info('Bundle {} expected scale {} is met'.format(bundle_id, expected_scale)) return else: sse_heartbeat_count_after_event = 0 log.info('Bundle {} waiting to reach expected scale {}'.format(bundle_id, expected_scale)) bundle_events_url = conduct_url.url('bundles/events', args) sse_events = sse_client.get_events(args.dcos_mode, conduct_url.conductr_host(args), bundle_events_url, auth=args.conductr_auth, verify=args.server_verification_file) last_scale = -1 last_log_message = None for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError('Bundle {} waiting to reach expected scale {}'.format(bundle_id, expected_scale)) # Check for bundle scale every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 bundle_scale = get_scale(bundle_id, args) if bundle_scale == expected_scale: # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info('Bundle {} expected scale {} is met'.format(bundle_id, expected_scale)) return else: if bundle_scale > last_scale: last_scale = bundle_scale # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) last_log_message = 'Bundle {} has scale {}, expected {}'.format(bundle_id, bundle_scale, expected_scale) log.progress(last_log_message, flush=False) else: last_log_message = '{}.'.format(last_log_message) log.progress(last_log_message, flush=False) raise WaitTimeoutError('Bundle {} waiting to reach expected scale {}'.format(bundle_id, expected_scale))
def count_installations(bundle_id, args): bundles_url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), bundles_url, auth=args.conductr_auth, verify=args.server_verification_file) response.raise_for_status() bundles = json.loads(response.text) matching_bundles = [bundle for bundle in bundles if bundle['bundleId'] == bundle_id] if matching_bundles: matching_bundle = matching_bundles[0] if 'bundleInstallations' in matching_bundle: return len(matching_bundle['bundleInstallations']) return 0
def load_server_ssl_verification_file(args): # When running within DCOS CLI, DCOS CLI has its own setting related to server side SSL verification. if not args.dcos_mode: custom_settings = load_from_file(args) conductr_host = conduct_url.conductr_host(args) conductr_port = args.port auth_config = get_auth_config(custom_settings, conductr_host, conductr_port) verification_file = get_config_value(auth_config, 'server_ssl_verification_file') if verification_file: return verification_file return None
def info(args): """`conduct info` command""" log = logging.getLogger(__name__) url = conduct_url.url("bundles", args) response = conduct_request.get( args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT, ) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) data = [ { "id": ("! " if bundle.get("hasError", False) else "") + (bundle["bundleId"] if args.long_ids else bundle_utils.short_id(bundle["bundleId"])), "name": bundle["attributes"]["bundleName"], "replications": len(bundle["bundleInstallations"]), "starting": sum([not execution["isStarted"] for execution in bundle["bundleExecutions"]]), "executions": sum([execution["isStarted"] for execution in bundle["bundleExecutions"]]), } for bundle in json.loads(response.text) ] data.insert(0, {"id": "ID", "name": "NAME", "replications": "#REP", "starting": "#STR", "executions": "#RUN"}) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{"padding": " " * padding}) has_error = False for row in data: has_error |= "!" in row["id"] log.screen( """\ {id: <{id_width}}{padding}\ {name: <{name_width}}{padding}\ {replications: >{replications_width}}{padding}\ {starting: >{starting_width}}{padding}\ {executions: >{executions_width}}""".format( **dict(row, **column_widths) ).rstrip() ) if has_error: log.screen("There are errors: use `conduct events` or `conduct logs` for further information") return True
def get_deployment_events(deployment_id, args): deployment_state_url = conduct_url.url( 'deployments/{}'.format(deployment_id), args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), deployment_state_url, auth=args.conductr_auth, verify=args.server_verification_file) if response.status_code == 404: return None else: response.raise_for_status() deployment_state = json.loads(response.text) return deployment_state
def load_conductr_credentials(args): # When running within DCOS CLI, DCOS CLI will provide authentication functionality. if not args.dcos_mode: custom_settings = load_from_file(args) conductr_host = conduct_url.conductr_host(args) conductr_port = args.port auth_config = get_auth_config(custom_settings, conductr_host, conductr_port) if get_config_value(auth_config, 'enabled'): username = get_config_value(auth_config, 'username') password = get_config_value(auth_config, 'password') if username and password: return username, password return None
def run(args): """`conduct run` command""" log = logging.getLogger(__name__) if args.affinity is not None and args.api_version == '1': log.error( 'Affinity feature is only available for v1.1 onwards of ConductR') return elif args.affinity is not None: path = 'bundles/{}?scale={}&affinity={}'.format( args.bundle, args.scale, args.affinity) else: path = 'bundles/{}?scale={}'.format(args.bundle, args.scale) url = conduct_url.url(path, args) response = conduct_request.put(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json[ 'bundleId'] if args.long_ids else bundle_utils.short_id( response_json['bundleId']) log.info('Bundle run request sent.') if not args.no_wait: bundle_scale.wait_for_scale(response_json['bundleId'], args.scale, wait_for_is_active=True, args=args) if not args.disable_instructions: log.info('Stop bundle with: {} stop{} {}'.format( args.command, args.cli_parameters, bundle_id)) log.info('Print ConductR info with: {} info{}'.format( args.command, args.cli_parameters)) log.info('Print bundle info with: {} info{} {}'.format( args.command, args.cli_parameters, bundle_id)) return True
def get_scale(bundle_id, args): bundles_url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), bundles_url, auth=args.conductr_auth, verify=args.server_verification_file) response.raise_for_status() bundles = json.loads(response.text) matching_bundles = [bundle for bundle in bundles if bundle['bundleId'] == bundle_id] if matching_bundles: matching_bundle = matching_bundles[0] if 'bundleExecutions' in matching_bundle: started_executions = [bundle_execution for bundle_execution in matching_bundle['bundleExecutions'] if bundle_execution['isStarted']] return len(started_executions) return 0
def post_license(args, license_file): """ Post license file to ConductR. :param args: input args obtained from argparse :param license_file: the path to license file """ url = conduct_url.url('license', args) response = conduct_request.post(args.dcos_mode, conductr_host(args), url, data=open(license_file, 'rb'), auth=args.conductr_auth, verify=args.server_verification_file) if response.status_code == 503: return False else: validation.raise_for_status_inc_3xx(response) return True
def acls(args): """`conduct acls` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) def get_system_version(bundle): if 'systemVersion' in bundle['attributes']: return bundle['attributes']['systemVersion'] else: return bundle['attributes']['system'].split('-')[-1] def is_started(bundle_executions): for execution in bundle_executions: if execution['isStarted']: return 'Running' return 'Starting' all_acls = [ { 'acl': acl, 'system': bundle['attributes']['system'], 'system_version': get_system_version(bundle), 'endpoint_name': endpoint_name, 'bundle_id': bundle['bundleId'] if args.long_ids else bundle_utils.short_id(bundle['bundleId']), 'bundle_name': bundle['attributes']['bundleName'], 'status': is_started(bundle['bundleExecutions']) } for bundle in json.loads(response.text) if bundle['bundleExecutions'] for endpoint_name, endpoint in bundle['bundleConfig']['endpoints'].items() if 'acls' in endpoint for acl in endpoint['acls'] ] if args.protocol_family == 'http': http_acls = [acl for acl in all_acls if 'http' in acl['acl']] display_http_acls(log, http_acls) elif args.protocol_family == 'tcp': tcp_acls = [acl for acl in all_acls if 'tcp' in acl['acl']] display_tcp_acls(log, tcp_acls) return True
def wait_for_condition(bundle_id, condition, condition_name, args): log = logging.getLogger(__name__) start_time = datetime.now() installed_bundles = count_installations(bundle_id, args) last_log_message = None if condition(installed_bundles): log.info('Bundle {} is {}'.format(bundle_id, condition_name)) return else: sse_heartbeat_count_after_event = 0 log.info('Bundle {} waiting to be {}'.format(bundle_id, condition_name)) bundle_events_url = conduct_url.url('bundles/events', args) sse_events = sse_client.get_events(args.dcos_mode, conduct_url.conductr_host(args), bundle_events_url, auth=args.conductr_auth, verify=args.server_verification_file) for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError('Bundle {} waiting to be {}'.format(bundle_id, condition_name)) # Check for installed bundles every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 installed_bundles = count_installations(bundle_id, args) if condition(installed_bundles): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info('Bundle {} {}'.format(bundle_id, condition_name)) return else: if last_log_message: last_log_message = '{}.'.format(last_log_message) else: last_log_message = 'Bundle {} still waiting to be {}'.format(bundle_id, condition_name) log.progress(last_log_message, flush=False) raise WaitTimeoutError('Bundle {} waiting to be {}'.format(bundle_id, condition_name))
def count_installations(bundle_id, args): bundles_url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), bundles_url, auth=args.conductr_auth, verify=args.server_verification_file) response.raise_for_status() bundles = json.loads(response.text) matching_bundles = [ bundle for bundle in bundles if bundle['bundleId'] == bundle_id ] if matching_bundles: matching_bundle = matching_bundles[0] if 'bundleInstallations' in matching_bundle: return len(matching_bundle['bundleInstallations']) return 0
def service_names(args): """`conduct service-names` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) bundles = json.loads(response.text) data, duplicate_endpoints = get_service_names_from_bundles(args, bundles) display_service_names(log, data, duplicate_endpoints) return True
def agents(args): """`conduct agents` command""" log = logging.getLogger(__name__) request_url = conduct_url.url('agents', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), request_url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) raw_data = json.loads(response.text) data = [ { 'address': 'ADDRESS', 'roles': 'ROLES', 'observed': 'OBSERVED BY' } ] for entry in raw_data: if args.role is None or args.role in entry['roles']: data.append({ 'address': entry['address'], 'roles': ','.join(entry['roles']), 'observed': ','.join(map(lambda e: e['node']['address'], entry['observedBy'])) }) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) for row in data: log.screen('''\ {address: <{address_width}}{padding}\ {roles: <{roles_width}}{padding}\ {observed: >{observed_width}}{padding}'''.format(**dict(row, **column_widths)).rstrip()) return True
def info(args): """`conduct info` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) bundles = json.loads(response.text) if args.bundle: return conduct_info_inspect.display_bundle(args, bundles, args.bundle) else: return conduct_info_list.display_bundles(args, bundles)
def info(args): """`conduct info` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) data = [ { 'id': ('! ' if bundle.get('hasError', False) else '') + (bundle['bundleId'] if args.long_ids else bundle_utils.short_id(bundle['bundleId'])), 'name': bundle['attributes']['bundleName'], 'replications': len(bundle['bundleInstallations']), 'starting': sum([not execution['isStarted'] for execution in bundle['bundleExecutions']]), 'executions': sum([execution['isStarted'] for execution in bundle['bundleExecutions']]) } for bundle in json.loads(response.text) ] data.insert(0, {'id': 'ID', 'name': 'NAME', 'replications': '#REP', 'starting': '#STR', 'executions': '#RUN'}) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) has_error = False for row in data: has_error |= '!' in row['id'] log.screen('''\ {id: <{id_width}}{padding}\ {name: <{name_width}}{padding}\ {replications: >{replications_width}}{padding}\ {starting: >{starting_width}}{padding}\ {executions: >{executions_width}}'''.format(**dict(row, **column_widths)).rstrip()) if has_error: log.screen('There are errors: use `conduct events` or `conduct logs` for further information') return True
def get_license(args): """ Get license from ConductR. Returns a tuple of Boolean, get_license_payload. The following return values are allowed - False, None: License endpoint does not exist at the ConductR control protocol - True, None: No license has been uploaded to ConductR - True, license_data: Returns the current license from ConductR :param args: input args obtained from argparse """ url = conduct_url.url('license', args) try: response = conduct_request.get(args.dcos_mode, conductr_host(args), url, auth=args.conductr_auth) if response.status_code == 404 or response.status_code == 503: return False, None else: validation.raise_for_status_inc_3xx(response) return True, json.loads(response.text) except DCOSHTTPException as e: if e.response.status_code == 404 or e.response.status_code == 503: return False, None else: raise e
def get_scale(bundle_id, wait_for_is_active, args): bundles_url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conduct_url.conductr_host(args), bundles_url, auth=args.conductr_auth, verify=args.server_verification_file) response.raise_for_status() bundles = json.loads(response.text) matching_bundles = [ bundle for bundle in bundles if bundle['bundleId'] == bundle_id ] if matching_bundles: matching_bundle = matching_bundles[0] if 'bundleExecutions' in matching_bundle: started_executions = [ bundle_execution for bundle_execution in matching_bundle['bundleExecutions'] if not wait_for_is_active or bundle_execution['isStarted'] ] return len(started_executions) return 0
def acls(args): """`conduct acls` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) bundles = json.loads(response.text) all_acls, http_acls, tcp_acls = get_acls_from_bundles(args, bundles) if args.protocol_family == 'http': display_http_acls(log, http_acls) elif args.protocol_family == 'tcp': display_tcp_acls(log, tcp_acls) return True
def load_v1(args): log = logging.getLogger(__name__) log.info('Retrieving bundle..') custom_settings = args.custom_settings resolve_cache_dir = args.resolve_cache_dir validate_cache_dir_permissions(resolve_cache_dir, log) bundle_file_name, bundle_file = resolver.resolve_bundle(custom_settings, resolve_cache_dir, args.bundle) configuration_file_name, configuration_file = (None, None) if args.configuration is not None: log.info('Retrieving configuration..') configuration_file_name, configuration_file = resolver.resolve_bundle_configuration(custom_settings, resolve_cache_dir, args.configuration) bundle_conf = ConfigFactory.parse_string(bundle_utils.conf(bundle_file)) overlay_bundle_conf = None if configuration_file is None else \ ConfigFactory.parse_string(bundle_utils.conf(configuration_file)) with_bundle_configurations = partial(apply_to_configurations, bundle_conf, overlay_bundle_conf) url = conduct_url.url('bundles', args) files = get_payload(bundle_file_name, bundle_file, with_bundle_configurations) if configuration_file is not None: files.append(('configuration', (configuration_file_name, open(configuration_file, 'rb')))) # TODO: Delete the bundle configuration file. # Currently, this results into a permission error on Windows. # Therefore, the deletion is disabled for now. # Issue: https://github.com/typesafehub/conductr-cli/issues/175 # if configuration_file and os.path.exists(configuration_file): # os.remove(configuration_file) log.info('Loading bundle to ConductR..') multipart = create_multipart(log, files) response = conduct_request.post(args.dcos_mode, conductr_host(args), url, data=multipart, auth=args.conductr_auth, verify=args.server_verification_file, headers={'Content-Type': multipart.content_type}) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json['bundleId'] if args.long_ids else bundle_utils.short_id(response_json['bundleId']) if not args.no_wait: bundle_installation.wait_for_installation(response_json['bundleId'], args) cleanup_old_bundles(resolve_cache_dir, bundle_file_name, excluded=bundle_file) log.info('Bundle loaded.') if not args.disable_instructions: log.info('Start bundle with: {} run{} {}'.format(args.command, args.cli_parameters, bundle_id)) log.info('Unload bundle with: {} unload{} {}'.format(args.command, args.cli_parameters, bundle_id)) log.info('Print ConductR info with: {} info{}'.format(args.command, args.cli_parameters)) if not log.is_info_enabled() and log.is_quiet_enabled(): log.quiet(response_json['bundleId']) return True
def deploy(args): """`conduct deploy` command""" log = logging.getLogger(__name__) bintray_webhook_secret = custom_settings.load_bintray_webhook_secret(args) if not bintray_webhook_secret: log.error( 'The deploy command requires bintray webhook secret to be configured' ) log.error('Add the following configuration to the ' 'custom settings file {}'.format( vars(args).get('custom_settings_file'))) log.error( ' conductr.continuous-delivery.bintray-webhook-secret = "configured-continuous-delivery-secret"' ) return resolved_version = resolver.resolve_bundle_version(args.custom_settings, args.bundle) if not resolved_version: log.error('Unable to resolve bundle {}'.format(args.bundle)) return # Build Continuous Delivery URL using our resolver mechanism deploy_uri = resolver.continuous_delivery_uri(args.custom_settings, resolved_version) if not deploy_uri: log.error('Unable to form Continuous Delivery uri for {}'.format( args.bundle)) return # Confirm with the user unless auto deploy is enabled accepted = True if args.auto_deploy else request_deploy_confirmation( resolved_version, args) if not accepted: log.info('Abort') return url = conduct_url.url(deploy_uri, args) # JSON Payload for deployment request payload = { 'package': resolved_version['package_name'], 'version': '{}-{}'.format(resolved_version['compatibility_version'], resolved_version['digest']) } # HTTP headers required for deployment request hmac_digest = bundle_deploy.generate_hmac_signature( bintray_webhook_secret, resolved_version['package_name']) headers = {'X-Bintray-WebHook-Hmac': hmac_digest} response = conduct_request.post(args.dcos_mode, conductr_host(args), url, json=payload, headers=headers, auth=args.conductr_auth, verify=args.server_verification_file) validation.raise_for_status_inc_3xx(response) deployment_id = response.text log.info('Deployment request sent.') log.info('Deployment id {}'.format(deployment_id)) if not args.no_wait: bundle_deploy.wait_for_deployment_complete(deployment_id, resolved_version, args) return True
def wait_for_deployment_complete(deployment_id, resolved_version, args): log = logging.getLogger(__name__) start_time = datetime.now() def display_bundle_id(bundle_id): return bundle_id if args.long_ids else bundle_utils.short_id(bundle_id) def is_completed_with_success(deployment_events): for event in deployment_events: if event['eventType'] == 'deploymentSuccess': return True return False def is_completed_with_failure(deployment_events): for event in deployment_events: if event['eventType'] == 'deploymentFailure': return True return False def display_deployment_event(deployment_event): event_type = deployment_event['eventType'] if event_type == 'deploymentStarted': return 'Deployment started' elif event_type == 'bundleDownload': return 'Downloading bundle' elif event_type == 'configDownload': compatible_bundle_id = display_bundle_id( deployment_event['compatibleBundleId']) return 'Downloading config from bundle {}'.format( compatible_bundle_id) elif event_type == 'load': return 'Loading bundle with config' if 'configFileName' in deployment_event else 'Loading bundle' elif event_type == 'deploy': bundle_old = deployment_event['bundleOld'] bundle_new = deployment_event['bundleNew'] deploy_progress = 'Deploying - {} old instance vs {} new instance'.format( bundle_old['scale'], bundle_new['scale']) return deploy_progress elif event_type == 'deploymentSuccess': return 'Success' elif event_type == 'deploymentFailure': return 'Failure: {}'.format(deployment_event['failure']) else: return 'Unknown deployment state {}'.format(deployment_event) def get_event_sequence(deployment_event): return deployment_event['deploymentSequence'] def log_message(deployment_events): latest_event = sorted(deployment_events, key=get_event_sequence)[-1] return display_deployment_event(latest_event) package_name = resolved_version['package_name'] tag = resolved_version['tag'] bundle_id = display_bundle_id(resolved_version['digest']) bundle_shorthand = '{}:{}-{}'.format(package_name, tag, bundle_id) log.info('Deploying {}'.format(bundle_shorthand)) deployment_events = get_deployment_events(deployment_id, args) if deployment_events and is_completed_with_success(deployment_events): log.info(log_message(deployment_events)) return elif deployment_events and is_completed_with_failure(deployment_events): raise ContinuousDeliveryError('Unable to deploy {} - {}'.format( bundle_shorthand, log_message(deployment_events))) else: sse_heartbeat_count_after_event = 0 deployment_events_url = conduct_url.url('deployments/events', args) sse_events = sse_client.get_events( args.dcos_mode, conduct_url.conductr_host(args), deployment_events_url, auth=args.conductr_auth, verify=args.server_verification_file) last_events = None last_log_message = None for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError( 'Deployment is still waiting to be completed') # Check for deployment state every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 deployment_events = get_deployment_events(deployment_id, args) if is_completed_with_success(deployment_events): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info(log_message(deployment_events)) return elif is_completed_with_failure(deployment_events): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) raise ContinuousDeliveryError( 'Unable to deploy {} - {}'.format( bundle_shorthand, log_message(deployment_events))) else: if deployment_events != last_events: last_events = deployment_events # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) last_log_message = log_message(deployment_events) log.progress(last_log_message, flush=False) else: last_log_message = '{}.'.format(last_log_message) log.progress(last_log_message, flush=False) raise WaitTimeoutError( 'Deployment for {} is still waiting to be completed')
def service_names(args): """`conduct service-names` command""" log = logging.getLogger(__name__) url = conduct_url.url('bundles', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), url, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) def execution_status(bundle_executions): for execution in bundle_executions: if execution['isStarted']: return 'Running' return 'Starting' def get_service_name_from_service_uri(service_uri): paths = urlparse(service_uri).path.split('/') if len(paths) > 1: return paths[1] else: return '' data_from_service_uri = [ ( { 'service_name': get_service_name_from_service_uri(service_uri), 'service_uri': service_uri, 'bundle_id': bundle['bundleId'] if args.long_ids else bundle_utils.short_id( bundle['bundleId']), 'bundle_name': bundle['attributes']['bundleName'], 'status': execution_status(bundle['bundleExecutions']) } ) for bundle in json.loads(response.text) if bundle['bundleExecutions'] for endpoint_name, endpoint in bundle['bundleConfig']['endpoints'].items() if 'services' in endpoint for service_uri in endpoint['services'] ] data_from_service_name = [ ( { 'service_name': endpoint['serviceName'], 'service_uri': None, 'bundle_id': bundle['bundleId'] if args.long_ids else bundle_utils.short_id( bundle['bundleId']), 'bundle_name': bundle['attributes']['bundleName'], 'status': execution_status(bundle['bundleExecutions']) } ) for bundle in json.loads(response.text) if bundle['bundleExecutions'] for endpoint_name, endpoint in bundle['bundleConfig']['endpoints'].items() if 'serviceName' in endpoint ] data = data_from_service_uri + data_from_service_name data = sorted([entry for entry in data if entry['service_name']], key=lambda line: line['service_name']) service_endpoints = {} for service in data: url = urlparse(service['service_uri']) if not (url.path == '' or url.path == '/'): try: service_endpoints[url.path] |= {service['service_uri']} except KeyError: service_endpoints[url.path] = {service['service_uri']} duplicate_endpoints = [service for (service, endpoint) in service_endpoints.items() if len(endpoint) > 1] \ if len(service_endpoints) > 0 else [] data.insert(0, {'service_name': 'SERVICE NAME', 'bundle_id': 'BUNDLE ID', 'bundle_name': 'BUNDLE NAME', 'status': 'STATUS'}) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) for row in data: log.screen( '{service_name: <{service_name_width}}{padding}' '{bundle_id: <{bundle_id_width}}{padding}' '{bundle_name: <{bundle_name_width}}{padding}' '{status: <{status_width}}'.format(**dict(row, **column_widths)).rstrip()) if len(duplicate_endpoints) > 0: log.screen('') log.warning('Multiple endpoints found for the following services: {}'.format(', '.join(duplicate_endpoints))) log.warning('Service resolution for these services is undefined.') return True
def run(_args=[], configure_logging=True): # If we're being invoked via DC/OS then route our http # calls via its extension to the requests library. In # addition remove the 'conduct-dcos' and 'conduct' arg so that the conduct # sub-commands are positioned correctly, along with their # arguments. if sys.argv and Path( sys.argv[0]).name == constants.DCOS_COMMAND_PREFIX + 'conduct': dcos_mode = True _args = sys.argv[2:] else: dcos_mode = False if not _args: # Remove the 'conduct' arg so that we start with the sub command directly _args = sys.argv[1:] # Parse arguments parser = build_parser(dcos_mode) argcomplete.autocomplete(parser) args = parser.parse_args(_args) args.dcos_mode = dcos_mode if not vars(args).get('func'): if vars(args).get('dcos_info'): print( 'Lightbend ConductR sub commands. Type \'dcos conduct\' to see more.' ) exit(0) else: parser.print_help() else: # Offline functions are the functions which do not require network to run, e.g. `conduct version` or # `conduct setup-dcos`. offline_functions = ['version', 'setup'] # Only setup network related args (i.e. host, bundle resolvers, basic auth, etc) for functions which requires # connectivity to ConductR. current_function = vars(args).get('func').__name__ if current_function not in offline_functions: # Add custom plugin dir to import path custom_plugins_dir = vars(args).get('custom_plugins_dir') if custom_plugins_dir: sys.path.append(custom_plugins_dir) # DC/OS provides the location of ConductR... if dcos_mode: args.command = 'dcos conduct' dcos_url = urlparse(config.get_config_val('core.dcos_url')) args.scheme = dcos_url.scheme args.ip = dcos_url.hostname default_http_port = 80 if dcos_url.scheme == 'http' else 443 args.port = dcos_url.port if dcos_url.port else default_http_port dcos_url_path = dcos_url.path if dcos_url.path else '/' args.base_path = dcos_url_path + 'service/{}/'.format( DEFAULT_DCOS_SERVICE) else: args.command = 'conduct' # Set ConductR host is --host or --ip argument not set # Also set the local_connection argument accordingly host_from_args = conduct_url.conductr_host(args) if not host_from_args: host_from_env = host.resolve_host_from_env() if host_from_env: args.host = host_from_env args.local_connection = False else: args.host = host.resolve_default_host() else: args.local_connection = False args.cli_parameters = get_cli_parameters(args) args.custom_settings = custom_settings.load_from_file(args) args.conductr_auth = custom_settings.load_conductr_credentials( args) # Ensure HTTPS is used if authentication is configured if args.conductr_auth and args.scheme != 'https': args.scheme = 'https' args.server_verification_file = custom_settings.load_server_ssl_verification_file( args) # Ensure verification file exists if specified if args.server_verification_file \ and not os.path.exists(args.server_verification_file): # Configure logging so error message can be logged properly before exiting with failure logging_setup.configure_logging(args) log = logging.getLogger(__name__) log.error( 'Ensure server SSL verification file exists: {}'.format( args.server_verification_file)) exit(1) if not args.dcos_mode and args.scheme == 'https': disable_urllib3_warnings() if configure_logging: logging_setup.configure_logging(args) is_completed_without_error = args.func(args) if not is_completed_without_error: exit(1)
def members(args): """`conduct members` command""" log = logging.getLogger(__name__) request_url = conduct_url.url('members', args) response = conduct_request.get(args.dcos_mode, conductr_host(args), request_url, auth=args.conductr_auth, verify=args.server_verification_file, timeout=DEFAULT_HTTP_TIMEOUT) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) raw_data = json.loads(response.text) data = [{ 'address': 'ADDRESS', 'uid': 'UID', 'roles': 'ROLES', 'status': 'STATUS', 'reachable': 'REACHABLE' }] unreachable_nodes = [] for entry in raw_data['unreachable']: unreachable_nodes.append(entry['node']) for entry in raw_data['members']: if args.role is None or args.role in entry['roles']: data.append({ 'address': entry['node']['address'], 'uid': entry['node']['uid'], 'roles': ','.join(entry['roles']), 'status': entry['status'], 'reachable': 'Yes' if entry['node'] not in unreachable_nodes else 'No' }) padding = 2 column_widths = dict(screen_utils.calc_column_widths(data), **{'padding': ' ' * padding}) for row in data: log.screen('''\ {uid: <{uid_width}}{padding}\ {address: <{address_width}}{padding}\ {roles: <{roles_width}}{padding}\ {status: <{status_width}}{padding}\ {reachable: >{reachable_width}}'''.format( **dict(row, **column_widths)).rstrip()) return True
def wait_for_deployment_complete(deployment_id, resolved_version, args): log = logging.getLogger(__name__) start_time = datetime.now() def display_bundle_id(bundle_id): return bundle_id if args.long_ids else bundle_utils.short_id(bundle_id) def is_completed_with_success(deployment_state): return deployment_state['eventType'] == "deploymentSuccess" def is_completed_with_failure(deployment_state): return deployment_state['eventType'] == "deploymentFailure" def log_message(deployment_state): event_type = deployment_state['eventType'] if event_type == 'deploymentStarted': return 'Deployment started' elif event_type == 'bundleDownload': return 'Downloading bundle' elif event_type == 'configDownload': compatible_bundle_id = display_bundle_id(deployment_state['compatibleBundleId']) return 'Downloading config from bundle {}'.format(compatible_bundle_id) elif event_type == 'load': return 'Loading bundle with config' if 'configFileName' in deployment_state else 'Loading bundle' elif event_type == 'deploy': bundle_old = deployment_state['bundleOld'] bundle_new = deployment_state['bundleNew'] deploy_progress = 'Deploying - {} old instance vs {} new instance'.format(bundle_old['scale'], bundle_new['scale']) return deploy_progress elif event_type == 'deploymentSuccess': return 'Success' elif event_type == 'deploymentFailure': return 'Failure: {}'.format(deployment_state['failure']) else: return 'Unknown deployment state {}'.format(deployment_state) package_name = resolved_version['package_name'] compatibility_version = resolved_version['compatibility_version'] bundle_id = display_bundle_id(resolved_version['digest']) bundle_shorthand = '{}:{}-{}'.format(package_name, compatibility_version, bundle_id) log.info('Deploying {}'.format(bundle_shorthand)) deployment_state = get_deployment_state(deployment_id, args) if deployment_state and is_completed_with_success(deployment_state): log.info(log_message(deployment_state)) return elif deployment_state and is_completed_with_failure(deployment_state): raise ContinuousDeliveryError('Unable to deploy {} - {}'.format(bundle_shorthand, log_message(deployment_state))) else: sse_heartbeat_count_after_event = 0 deployment_events_url = conduct_url.url('deployments/events', args) sse_events = sse_client.get_events(args.dcos_mode, conduct_url.conductr_host(args), deployment_events_url, auth=args.conductr_auth, verify=args.server_verification_file) last_deployment_state = None last_log_message = None for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError('Deployment is still waiting to be completed') # Check for deployment state every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 deployment_state = get_deployment_state(deployment_id, args) if is_completed_with_success(deployment_state): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info(log_message(deployment_state)) return elif is_completed_with_failure(deployment_state): # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) raise ContinuousDeliveryError('Unable to deploy {} - {}'.format(bundle_shorthand, log_message(deployment_state))) else: if deployment_state != last_deployment_state: last_deployment_state = deployment_state # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) last_log_message = log_message(deployment_state) log.progress(last_log_message, flush=False) else: last_log_message = '{}.'.format(last_log_message) log.progress(last_log_message, flush=False) raise WaitTimeoutError('Deployment for {} is still waiting to be completed')
def wait_for_scale(bundle_id, expected_scale, wait_for_is_active, args): log = logging.getLogger(__name__) start_time = datetime.now() bundle_scale = get_scale(bundle_id, wait_for_is_active, args) if bundle_scale == expected_scale: log.info('Bundle {} expected scale {} is met'.format( bundle_id, expected_scale)) return else: sse_heartbeat_count_after_event = 0 log.info('Bundle {} waiting to reach expected scale {}'.format( bundle_id, expected_scale)) bundle_events_url = conduct_url.url('bundles/events', args) sse_events = sse_client.get_events( args.dcos_mode, conduct_url.conductr_host(args), bundle_events_url, auth=args.conductr_auth, verify=args.server_verification_file) last_scale = -1 last_log_message = None for event in sse_events: sse_heartbeat_count_after_event += 1 elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: raise WaitTimeoutError( 'Bundle {} waiting to reach expected scale {}'.format( bundle_id, expected_scale)) # Check for bundle scale every 3 heartbeats from the last received event. if event.event or (sse_heartbeat_count_after_event % 3 == 0): if event.event: sse_heartbeat_count_after_event = 0 bundle_scale = get_scale(bundle_id, wait_for_is_active, args) if bundle_scale == expected_scale: # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) log.info('Bundle {} expected scale {} is met'.format( bundle_id, expected_scale)) return else: if bundle_scale > last_scale: last_scale = bundle_scale # Reprint previous message with flush to go to next line if last_log_message: log.progress(last_log_message, flush=True) last_log_message = 'Bundle {} has scale {}, expected {}'.format( bundle_id, bundle_scale, expected_scale) log.progress(last_log_message, flush=False) else: last_log_message = '{}.'.format(last_log_message) log.progress(last_log_message, flush=False) raise WaitTimeoutError( 'Bundle {} waiting to reach expected scale {}'.format( bundle_id, expected_scale))
def deploy(args): """`conduct deploy` command""" log = logging.getLogger(__name__) bintray_webhook_secret = custom_settings.load_bintray_webhook_secret(args) if not bintray_webhook_secret: log.error('The deploy command requires bintray webhook secret to be configured') log.error('Add the following configuration to the ' 'custom settings file {}'.format(vars(args).get('custom_settings_file'))) log.error(' conductr.continuous-delivery.bintray-webhook-secret = "configured-continuous-delivery-secret"') return resolved_version = resolver.resolve_bundle_version(args.custom_settings, args.bundle) if not resolved_version: log.error('Unable to resolve bundle {}'.format(args.bundle)) return # Build Continuous Delivery URL using our resolver mechanism deploy_uri = resolver.continuous_delivery_uri(args.custom_settings, resolved_version) if not deploy_uri: log.error('Unable to form Continuous Delivery uri for {}'.format(args.bundle)) return # Confirm with the user unless auto deploy is enabled accepted = True if args.auto_deploy else request_deploy_confirmation(resolved_version, args) if not accepted: log.info('Abort') return url = conduct_url.url(deploy_uri, args) # JSON Payload for deployment request payload = { 'package': resolved_version['package_name'], 'version': '{}-{}'.format(resolved_version['compatibility_version'], resolved_version['digest']) } # HTTP headers required for deployment request hmac_digest = bundle_deploy.generate_hmac_signature(bintray_webhook_secret, resolved_version['package_name']) headers = { 'X-Bintray-WebHook-Hmac': hmac_digest } response = conduct_request.post(args.dcos_mode, conductr_host(args), url, json=payload, headers=headers, auth=args.conductr_auth, verify=args.server_verification_file) validation.raise_for_status_inc_3xx(response) deployment_id = response.text log.info('Deployment request sent.') log.info('Deployment id {}'.format(deployment_id)) if not args.no_wait: bundle_deploy.wait_for_deployment_complete(deployment_id, resolved_version, args) return True
def run(_args=[], configure_logging=True): # If we're being invoked via DC/OS then route our http # calls via its extension to the requests library. In # addition remove the 'conduct-dcos' and 'conduct' arg so that the conduct # sub-commands are positioned correctly, along with their # arguments. if sys.argv and Path(sys.argv[0]).name == constants.DCOS_COMMAND_PREFIX + 'conduct': dcos_mode = True _args = sys.argv[2:] else: dcos_mode = False if not _args: # Remove the 'conduct' arg so that we start with the sub command directly _args = sys.argv[1:] # Parse arguments parser = build_parser(dcos_mode) argcomplete.autocomplete(parser) args = parser.parse_args(_args) args.dcos_mode = dcos_mode if not vars(args).get('func'): if vars(args).get('dcos_info'): print('Lightbend ConductR sub commands. Type \'dcos conduct\' to see more.') exit(0) else: parser.print_help() else: # Offline functions are the functions which do not require network to run, e.g. `conduct version` or # `conduct setup-dcos`. offline_functions = ['version', 'setup'] # Only setup network related args (i.e. host, bundle resolvers, basic auth, etc) for functions which requires # connectivity to ConductR. current_function = vars(args).get('func').__name__ if current_function not in offline_functions: # Add custom plugin dir to import path custom_plugins_dir = vars(args).get('custom_plugins_dir') if custom_plugins_dir: sys.path.append(custom_plugins_dir) # DC/OS provides the location of ConductR... if dcos_mode: args.command = 'dcos conduct' dcos_url = urlparse(config.get_config_val('core.dcos_url')) args.scheme = dcos_url.scheme args.ip = dcos_url.hostname default_http_port = 80 if dcos_url.scheme == 'http' else 443 args.port = dcos_url.port if dcos_url.port else default_http_port dcos_url_path = dcos_url.path if dcos_url.path else '/' args.base_path = dcos_url_path + 'service/{}/'.format(DEFAULT_DCOS_SERVICE) else: args.command = 'conduct' # Ensure ConductR host is not empty host_from_args = conduct_url.conductr_host(args) if not host_from_args: host_from_env = host.resolve_default_host() if host_from_env: args.host = host_from_env else: # Configure logging so error message can be logged properly before exiting with failure logging_setup.configure_logging(args) log = logging.getLogger(__name__) log.error('ConductR host address is not specified') log.error('Please ensure either `{}` environment is specified,' ' or specify the ConductR host using `--host` argument'.format(CONDUCTR_HOST)) exit(1) else: args.local_connection = False args.cli_parameters = get_cli_parameters(args) args.custom_settings = custom_settings.load_from_file(args) args.conductr_auth = custom_settings.load_conductr_credentials(args) # Ensure HTTPS is used if authentication is configured if args.conductr_auth and not args.scheme == 'https': # Configure logging so error message can be logged properly before exiting with failure logging_setup.configure_logging(args) log = logging.getLogger(__name__) log.error('Unable to use Basic Auth over {}'.format(args.scheme)) log.error('Please ensure either `{}` environment is set to `https`,' ' or specify https using `--scheme https` argument'.format(CONDUCTR_SCHEME)) exit(1) args.server_verification_file = custom_settings.load_server_ssl_verification_file(args) # Ensure verification file exists if specified if args.server_verification_file \ and not os.path.exists(args.server_verification_file): # Configure logging so error message can be logged properly before exiting with failure logging_setup.configure_logging(args) log = logging.getLogger(__name__) log.error('Ensure server SSL verification file exists: {}'.format(args.server_verification_file)) exit(1) if not args.dcos_mode and args.scheme == 'https': disable_urllib3_warnings() if configure_logging: logging_setup.configure_logging(args) is_completed_without_error = args.func(args) if not is_completed_without_error: exit(1)
def test_conductr_host(self): args = MagicMock() args.host = '127.0.0.1' result = conduct_url.conductr_host(args) self.assertEqual('127.0.0.1', result)
def load_v1(args): log = logging.getLogger(__name__) log.info('Retrieving bundle..') custom_settings = args.custom_settings bundle_resolve_cache_dir = args.bundle_resolve_cache_dir configuration_cache_dir = args.configuration_resolve_cache_dir validate_cache_dir_permissions(bundle_resolve_cache_dir, configuration_cache_dir, log) initial_bundle_file_name, bundle_file = resolver.resolve_bundle( custom_settings, bundle_resolve_cache_dir, args.bundle, args.offline_mode) configuration_file_name, configuration_file = (None, None) if args.configuration is not None: log.info('Retrieving configuration..') configuration_file_name, configuration_file = \ resolver.resolve_bundle_configuration(custom_settings, configuration_cache_dir, args.configuration, args.offline_mode) bundle_conf_text = bundle_utils.conf(bundle_file) bundle_conf = ConfigFactory.parse_string(bundle_conf_text) bundle_file_name, bundle_open_file = open_bundle(initial_bundle_file_name, bundle_file, bundle_conf_text) overlay_bundle_conf = None if configuration_file is None else \ ConfigFactory.parse_string(bundle_utils.conf(configuration_file)) with_bundle_configurations = partial(apply_to_configurations, bundle_conf, overlay_bundle_conf) url = conduct_url.url('bundles', args) files = get_payload(bundle_file_name, bundle_open_file, with_bundle_configurations) if configuration_file is not None: open_configuration_file, config_digest = bundle_utils.digest_extract_and_open( configuration_file) files.append(('configuration', (configuration_file_name, open_configuration_file))) # TODO: Delete the bundle configuration file. # Currently, this results into a permission error on Windows. # Therefore, the deletion is disabled for now. # Issue: https://github.com/typesafehub/conductr-cli/issues/175 # if configuration_file and os.path.exists(configuration_file): # os.remove(configuration_file) log.info('Loading bundle to ConductR..') multipart = create_multipart(log, files) response = conduct_request.post( args.dcos_mode, conductr_host(args), url, data=multipart, auth=args.conductr_auth, verify=args.server_verification_file, headers={'Content-Type': multipart.content_type}) validation.raise_for_status_inc_3xx(response) if log.is_verbose_enabled(): log.verbose(validation.pretty_json(response.text)) response_json = json.loads(response.text) bundle_id = response_json[ 'bundleId'] if args.long_ids else bundle_utils.short_id( response_json['bundleId']) if not args.no_wait: bundle_installation.wait_for_installation(response_json['bundleId'], args) cleanup_old_bundles(bundle_resolve_cache_dir, bundle_file_name, excluded=bundle_file) log.info('Bundle loaded.') if not args.disable_instructions: log.info('Start bundle with: {} run{} {}'.format( args.command, args.cli_parameters, bundle_id)) log.info('Unload bundle with: {} unload{} {}'.format( args.command, args.cli_parameters, bundle_id)) log.info('Print ConductR info with: {} info{}'.format( args.command, args.cli_parameters)) log.info('Print bundle info with: {} info{} {}'.format( args.command, args.cli_parameters, bundle_id)) if not log.is_info_enabled() and log.is_quiet_enabled(): log.quiet(response_json['bundleId']) return True