def _delete_nodes_async(self, *args, cluster_name, cluster_vapp_href, node_names_list): try: self._update_task( TaskStatus.RUNNING, message=f"Deleting {len(node_names_list)} node(s) " f"from cluster {cluster_name}") self._delete_nodes(cluster_name=cluster_name, cluster_vapp_href=cluster_vapp_href, node_names_list=node_names_list) self._update_task( TaskStatus.SUCCESS, message=f"Deleted {len(node_names_list)} node(s)" f" to cluster {cluster_name}") except Exception as e: LOGGER.error(f"Unexpected error while deleting nodes " f"{node_names_list}: {e}", exc_info=True) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self.logout_sys_admin_client()
def get_response_fields(request_msg, fsencoding, is_mqtt): """Get the msg json and response fields request message.""" msg_json, request_id = None, None try: # Parse the message if is_mqtt: payload_json = json.loads(request_msg.payload.decode(fsencoding)) http_req_json = json.loads( base64.b64decode(payload_json['httpRequest'])) request_id = payload_json["headers"]["requestId"] msg_json = http_req_json['message'] # Use api access token as authorization token -- this may involve # overwriting the current authorization token msg_json['headers']['Authorization'] = \ 'Bearer ' + http_req_json['securityContext']['apiAccessToken'] else: msg_json = json.loads(request_msg.decode(fsencoding))[0] request_id = msg_json['id'] set_thread_request_id(request_id) result = request_processor.process_request(msg_json) status_code = result['status_code'] reply_body = result['body'] except Exception as e: if isinstance(e, CseRequestError): status_code = e.status_code else: status_code = requests.codes.internal_server_error reply_body = {RESPONSE_MESSAGE_KEY: str(e)} tb = traceback.format_exc() LOGGER.error(tb) return msg_json, reply_body, status_code, request_id
def delete_nodes_thread(self): LOGGER.debug(f"About to delete nodes from cluster with name: " f"{self.cluster_name}") try: vapp = VApp(self.tenant_client, href=self.cluster['vapp_href']) template = self._get_template() self._update_task( TaskStatus.RUNNING, message=f"Deleting " f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}" f" node(s) from " f"{self.cluster_name}({self.cluster_id})") try: server_config = get_server_runtime_config() delete_nodes_from_cluster( server_config, vapp, template, self.req_spec.get(RequestKey.NODE_NAMES_LIST), self.req_spec.get(RequestKey.FORCE_DELETE)) except Exception: LOGGER.error(f"Couldn't delete node " f"{self.req_spec.get(RequestKey.NODE_NAMES_LIST)}" f" from cluster:{self.cluster_name}") self._update_task( TaskStatus.RUNNING, message=f"Undeploying " f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}" f" node(s) for {self.cluster_name}({self.cluster_id})") for vm_name in self.req_spec.get(RequestKey.NODE_NAMES_LIST): vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name)) try: task = vm.undeploy() self.tenant_client.get_task_monitor().wait_for_status(task) except Exception: LOGGER.warning(f"Couldn't undeploy VM {vm_name}") self._update_task( TaskStatus.RUNNING, message=f"Deleting " f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}" f" VM(s) for {self.cluster_name}({self.cluster_id})") task = vapp.delete_vms(self.req_spec.get(RequestKey.NODE_NAMES_LIST)) # noqa: E501 self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.SUCCESS, message=f"Deleted " f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}" f" node(s) to cluster " f"{self.cluster_name}({self.cluster_id})") except Exception as e: LOGGER.error(traceback.format_exc()) error_obj = error_to_json(e) stack_trace = \ ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self._disconnect_sys_admin()
def consumer_thread(c): try: LOGGER.info('about to start consumer_thread %s', c) c.run() except Exception: click.echo('about to stop consumer_thread') LOGGER.error(traceback.format_exc()) c.stop()
def consumer_thread(c): try: LOGGER.info(f"About to start consumer_thread {c}.") c.run() except Exception: click.echo("About to stop consumer_thread.") LOGGER.error(traceback.format_exc()) c.stop()
def exception_handler_wrapper(*args, **kwargs): result = {} try: result = func(*args, **kwargs) except Exception as err: result['status_code'] = INTERNAL_SERVER_ERROR result['body'] = error_to_json(err) LOGGER.error(traceback.format_exc()) return result
def create_nodes_thread(self): LOGGER.debug(f"About to add nodes to cluster with name: " f"{self.cluster_name}") try: server_config = get_server_runtime_config() org_resource = self.tenant_client.get_org() org = Org(self.tenant_client, resource=org_resource) vdc = VDC(self.tenant_client, href=self.cluster['vdc_href']) vapp = VApp(self.tenant_client, href=self.cluster['vapp_href']) template = self.get_template() self.update_task( TaskStatus.RUNNING, message=f"Creating {self.req_spec['node_count']} node(s) for " f"{self.cluster_name}({self.cluster_id})") new_nodes = add_nodes(self.req_spec['node_count'], template, self.req_spec['node_type'], server_config, self.tenant_client, org, vdc, vapp, self.req_spec) if self.req_spec['node_type'] == TYPE_NFS: self.update_task( TaskStatus.SUCCESS, message=f"Created {self.req_spec['node_count']} node(s) " f"for {self.cluster_name}({self.cluster_id})") elif self.req_spec['node_type'] == TYPE_NODE: self.update_task( TaskStatus.RUNNING, message=f"Adding {self.req_spec['node_count']} node(s) to " f"cluster {self.cluster_name}({self.cluster_id})") target_nodes = [] for spec in new_nodes['specs']: target_nodes.append(spec['target_vm_name']) vapp.reload() join_cluster(server_config, vapp, template, target_nodes) self.update_task( TaskStatus.SUCCESS, message=f"Added {self.req_spec['node_count']} node(s) to " f"cluster {self.cluster_name}({self.cluster_id})") except NodeCreationError as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE]) self.update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION], stack_trace=stack_trace) raise except Exception as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE]) self.update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION], stack_trace=stack_trace) finally: self._disconnect_sys_admin()
def exception_handler_wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except (KeyError, TypeError, ValueError) as error: LOGGER.error(error) raise cse_exception.BadRequestError(error_message=str(error)) except Exception as error: LOGGER.error(error) raise error return result
def exception_handler_wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except HTTPError as error: response_dict = json.loads(error.response.text) error_message = response_dict.get('message') LOGGER.error(error_message) raise cse_exception.DefEntityServiceError(error_message=error_message, minor_error_code=error.response.status_code) # noqa: E501 except Exception as error: LOGGER.error(error) raise error return result
def exception_handler_wrapper(*args, **kwargs): result = {} try: result = func(*args, **kwargs) except CseRequestError as e: result['status_code'] = e.status_code result['body'] = {'message': str(e)} LOGGER.error(traceback.format_exc()) except Exception as err: result['status_code'] = requests.codes.internal_server_error result['body'] = error_to_json(err) LOGGER.error(traceback.format_exc()) return result
def delete_cluster_thread(self): LOGGER.debug('about to delete cluster with name: %s', self.cluster_name) try: vdc = VDC(self.client_tenant, href=self.cluster['vdc_href']) task = vdc.delete_vapp(self.cluster['name'], force=True) self.client_tenant.get_task_monitor().wait_for_status(task) self.update_task(TaskStatus.SUCCESS, message='Deleted cluster %s(%s)' % (self.cluster_name, self.cluster_id)) except Exception as e: LOGGER.error(traceback.format_exc()) self.update_task(TaskStatus.ERROR, error_message=str(e))
def exception_handler_wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except HTTPError as error: response_dict = json.loads(error.response.text) error_message = response_dict.get('message') LOGGER.error(error_message) raise cse_exceptions.DefSchemaServiceError( error_message=error_message, minor_error_code=MinorErrorCode.DEFAULT_ERROR_CODE) except Exception as error: LOGGER.error(error) raise error return result
def delete_cluster_thread(self): LOGGER.debug(f"About to delete cluster with name: {self.cluster_name}") try: vdc = VDC(self.tenant_client, href=self.cluster['vdc_href']) task = vdc.delete_vapp(self.cluster['name'], force=True) self.tenant_client.get_task_monitor().wait_for_status(task) self.update_task(TaskStatus.SUCCESS, message=f"Deleted cluster {self.cluster_name}" f"({self.cluster_id})") except Exception as e: LOGGER.error(traceback.format_exc()) self.update_task(TaskStatus.ERROR, error_message=str(e)) finally: self._disconnect_sys_admin()
def _delete_cluster_async(self, *args, cluster_name, cluster_vdc_href): try: self._update_task(TaskStatus.RUNNING, message=f"Deleting cluster {cluster_name}") self._delete_cluster(cluster_name=cluster_name, cluster_vdc_href=cluster_vdc_href) self._update_task(TaskStatus.SUCCESS, message=f"Deleted cluster {cluster_name}") except Exception as e: LOGGER.error(f"Unexpected error while deleting cluster: {e}", exc_info=True) self._update_task(TaskStatus.ERROR, error_message=str(e)) finally: self.logout_sys_admin_client()
def create_nodes_thread(self): LOGGER.debug('about to add nodes to cluster with name: %s', self.cluster_name) try: org_resource = self.client_tenant.get_org() org = Org(self.client_tenant, resource=org_resource) vdc = VDC(self.client_tenant, href=self.cluster['vdc_href']) vapp = VApp(self.client_tenant, href=self.cluster['vapp_href']) template = self.get_template() self.update_task( TaskStatus.RUNNING, message='Creating %s node(s) for %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) new_nodes = add_nodes(self.body['node_count'], template, self.body['node_type'], self.config, self.client_tenant, org, vdc, vapp, self.body) if self.body['node_type'] == TYPE_NFS: self.update_task(TaskStatus.SUCCESS, message='Created %s node(s) for %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) elif self.body['node_type'] == TYPE_NODE: self.update_task(TaskStatus.RUNNING, message='Adding %s node(s) to %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) target_nodes = [] for spec in new_nodes['specs']: target_nodes.append(spec['target_vm_name']) vapp.reload() join_cluster(self.config, vapp, template, target_nodes) self.update_task(TaskStatus.SUCCESS, message='Added %s node(s) to cluster %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) except NodeCreationError as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) self.update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION]) raise except Exception as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) self.update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION])
def delete_nodes_thread(self): LOGGER.debug(f"About to delete nodes from cluster with name: " f"{self.cluster_name}") try: vapp = VApp(self.tenant_client, href=self.cluster['vapp_href']) template = self.get_template() self.update_task( TaskStatus.RUNNING, message=f"Deleting {len(self.req_spec['nodes'])} node(s) from " f"{self.cluster_name}({self.cluster_id})") try: server_config = get_server_runtime_config() delete_nodes_from_cluster(server_config, vapp, template, self.req_spec['nodes'], self.req_spec['force']) except Exception: LOGGER.error(f"Couldn't delete node {self.req_spec['nodes']} " f"from cluster:{self.cluster_name}") self.update_task( TaskStatus.RUNNING, message=f"Undeploying {len(self.req_spec['nodes'])} node(s) " f"for {self.cluster_name}({self.cluster_id})") for vm_name in self.req_spec['nodes']: vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name)) try: task = vm.undeploy() self.tenant_client.get_task_monitor().wait_for_status(task) except Exception: LOGGER.warning(f"Couldn't undeploy VM {vm_name}") self.update_task( TaskStatus.RUNNING, message=f"Deleting {len(self.req_spec['nodes'])} VM(s) for " f"{self.cluster_name}({self.cluster_id})") task = vapp.delete_vms(self.req_spec['nodes']) self.tenant_client.get_task_monitor().wait_for_status(task) self.update_task( TaskStatus.SUCCESS, message=f"Deleted {len(self.req_spec['nodes'])} node(s) to " f"cluster {self.cluster_name}({self.cluster_id})") except Exception as e: LOGGER.error(traceback.format_exc()) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE]) self.update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION], stack_trace=stack_trace) finally: self._disconnect_sys_admin()
def get_spec(self, format): result = {} try: file_name = resource_string('container_service_extension', 'swagger/swagger.yaml') if format == 'swagger.yaml': result['body'] = file_name else: spec = yaml.safe_load(file_name) result['body'] = json.loads(json.dumps(spec)) result['status_code'] = OK except Exception: LOGGER.error(traceback.format_exc()) result['body'] = [] result['status_code'] = INTERNAL_SERVER_ERROR result['message'] = 'spec file not found: check installation.' return result
def on_message(self, unused_channel, basic_deliver, properties, body): self.acknowledge_message(basic_deliver.delivery_tag) try: body_json = json.loads(body.decode(self.fsencoding))[0] LOGGER.debug('Received message # %s from %s (%s): %s, props: %s', basic_deliver.delivery_tag, properties.app_id, threading.currentThread().ident, json.dumps(body_json), properties) result = self.service_processor.process_request(body_json) status_code = result['status_code'] reply_body = json.dumps(result['body']) if status_code == 500 and \ reply_body == '[]' and \ 'message' in result: reply_body = '{"message": "%s"}' % result['message'] except Exception as e: reply_body = '{"message": "%s"}' % str(e) status_code = 500 tb = traceback.format_exc() LOGGER.error(tb) if properties.reply_to is not None: reply_msg = { 'id': body_json['id'], 'headers': { 'Content-Type': body_json['headers']['Accept'], 'Content-Length': len(reply_body) }, 'statusCode': status_code, 'body': base64.b64encode(reply_body.encode()).decode(self.fsencoding), 'request': False } LOGGER.debug('reply: %s', json.dumps(reply_body)) reply_properties = pika.BasicProperties( correlation_id=properties.correlation_id) result = self._channel.basic_publish( exchange=properties.headers['replyToExchange'], routing_key=properties.reply_to, body=json.dumps(reply_msg), properties=reply_properties)
def __init__(self, client): """Initialize ComputePolicyManager Object. :param pyvcloud.vcd.client client: :raises: OperationNotSupportedException: If cloudapi endpoint is not found in session. :raises: ValueError: If non sys admin client is passed during initialization. """ if not client.is_sysadmin(): raise ValueError("Only Sys admin clients should be used to " "initialize ComputePolicyManager.") self._vcd_client = client token = self._vcd_client.get_access_token() is_jwt_token = True if not token: token = self._vcd_client.get_xvcloud_authorization_token() is_jwt_token = False self._session = self._vcd_client.get_vcloud_session() try: self._cloudapi_client = CloudApiClient( base_url=self._vcd_client.get_cloudapi_uri(), token=token, is_jwt_token=is_jwt_token, api_version=self._vcd_client.get_api_version(), verify_ssl=self._vcd_client._verify_ssl_certs) # Since the /cloudapi endpoint was added before the compute policy # endpoint. Mere presence of the /cloudapi uri is not enough, we # need to make sure that this cloud api client will be of actual # use to us. self._cloudapi_client.do_request( method=RequestMethod.GET, cloudapi_version=CLOUDAPI_VERSION_1_0_0, resource_url_relative_path=f"{CloudApiResource.VDC_COMPUTE_POLICIES}") # noqa: E501 except requests.exceptions.HTTPError as err: LOGGER.error(err) raise OperationNotSupportedException( "Cloudapi endpoint unavailable at current api version.")
def _delete_nodes(self, *args, cluster_name, cluster_vapp_href, node_names_list): LOGGER.debug(f"About to delete nodes {node_names_list} " f"from cluster {cluster_name}") vapp = VApp(self.tenant_client, href=cluster_vapp_href) try: delete_nodes_from_cluster(vapp, node_names_list) except Exception: LOGGER.error(f"Couldn't delete node {node_names_list} " f"from cluster:{cluster_name}") for vm_name in node_names_list: vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name)) try: task = vm.undeploy() self.tenant_client.get_task_monitor().wait_for_status(task) except Exception: LOGGER.warning(f"Couldn't undeploy VM {vm_name}") task = vapp.delete_vms(node_names_list) self.tenant_client.get_task_monitor().wait_for_status(task)
def connect(self): def on_connect(mqtt_client, userdata, flags, rc): LOGGER.info(f'MQTT client connected with result code {rc} and ' f'flags {flags}') mqtt_client.subscribe(self.listen_topic, qos=constants.QOS_LEVEL) def on_message(mqtt_client, userdata, msg): # No longer processing messages if server is closing if self._is_closing: return if self._ctpe.max_threads_busy(): self.send_too_many_requests_response(msg) else: self._ctpe.submit(lambda: self.process_mqtt_message(msg)) def on_subscribe(mqtt_client, userdata, msg_id, given_qos): LOGGER.info(f'MQTT client subscribed with given_qos: {given_qos}') def on_disconnect(mqtt_client, userdata, rc): LOGGER.info(f'MQTT disconnect with reason: {rc}') self._mqtt_client = mqtt.Client(client_id=constants.MQTT_CLIENT_ID, transport=constants.TRANSPORT_WSS) self._mqtt_client.username_pw_set(username=self.client_username, password=self.token) cert_req = ssl.CERT_REQUIRED if self.verify_ssl else ssl.CERT_NONE self._mqtt_client.tls_set(cert_reqs=cert_req) self._mqtt_client.ws_set_options(path=constants.MQTT_BROKER_PATH) # Setup callbacks self._mqtt_client.on_connect = on_connect self._mqtt_client.on_message = on_message self._mqtt_client.on_disconnect = on_disconnect self._mqtt_client.on_subscribe = on_subscribe try: self._mqtt_client.connect(self.url, port=constants.MQTT_CONNECT_PORT) except Exception as e: LOGGER.error(f'MQTT client connection error: {e}') raise self._mqtt_client.loop_forever()
def delete_nodes_thread(self): LOGGER.debug('about to delete nodes from cluster with name: %s', self.cluster_name) try: vapp = VApp(self.client_tenant, href=self.cluster['vapp_href']) template = self.get_template() self.update_task( TaskStatus.RUNNING, message='Deleting %s node(s) from %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) try: delete_nodes_from_cluster(self.config, vapp, template, self.body['nodes'], self.body['force']) except Exception: LOGGER.error("Couldn't delete node %s from cluster:%s" % (self.body['nodes'], self.cluster_name)) self.update_task( TaskStatus.RUNNING, message='Undeploying %s node(s) for %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) for vm_name in self.body['nodes']: vm = VM(self.client_tenant, resource=vapp.get_vm(vm_name)) try: task = vm.undeploy() self.client_tenant.get_task_monitor().wait_for_status(task) except Exception as e: LOGGER.warning('couldn\'t undeploy VM %s' % vm_name) self.update_task( TaskStatus.RUNNING, message='Deleting %s VM(s) for %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) task = vapp.delete_vms(self.body['nodes']) self.client_tenant.get_task_monitor().wait_for_status(task) self.update_task( TaskStatus.SUCCESS, message='Deleted %s node(s) to cluster %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) except Exception as e: LOGGER.error(traceback.format_exc()) self.update_task(TaskStatus.ERROR, error_message=str(e))
def exception_handler_wrapper(*args, **kwargs): result = {} try: result = func(*args, **kwargs) except Exception as err: if isinstance(err, CseRequestError): result['status_code'] = err.status_code minor_error_code = err.minor_error_code else: result['status_code'] = requests.codes.internal_server_error minor_error_code = MinorErrorCode.DEFAULT_ERROR_CODE error_string = str(err if err else '') result['body'] = { RESPONSE_MESSAGE_KEY: { ERROR_MINOR_CODE_KEY: int(minor_error_code), ERROR_DESCRIPTION_KEY: error_string } } LOGGER.error(traceback.format_exc()) return result
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except (MasterNodeCreationError, WorkerNodeCreationError, NFSNodeCreationError, ClusterJoiningError, ClusterInitializationError): try: # arg[0] refers to the current instance of the broker thread broker_instance = args[0] # param self if broker_instance.req_spec.get(RequestKey.ROLLBACK): broker_instance.cluster_rollback() except Exception as err: LOGGER.error(f"Failed to rollback cluster creation:{str(err)}") except NodeCreationError as e: try: broker_instance = args[0] node_list = e.node_names if broker_instance.req_spec.get(RequestKey.ROLLBACK): broker_instance.node_rollback(node_list) except Exception as err: LOGGER.error(f"Failed to rollback node creation:{str(err)}")
def _delete_nodes_async(self, *args, cluster_name, cluster_vapp_href, node_names_list): try: self._update_task( TaskStatus.RUNNING, message=f"Deleting {len(node_names_list)} node(s) " f"from cluster {cluster_name}") self._delete_nodes(cluster_name=cluster_name, cluster_vapp_href=cluster_vapp_href, node_names_list=node_names_list) self._update_task(TaskStatus.SUCCESS, message=f"Deleted {len(node_names_list)} node(s)" f" to cluster {cluster_name}") except Exception as e: LOGGER.error( f"Unexpected error while deleting nodes " f"{node_names_list}: {e}", exc_info=True) self._update_task(TaskStatus.ERROR, error_message=str(e)) finally: self.logout_sys_admin_client()
def on_message(self, unused_channel, basic_deliver, properties, body): self.acknowledge_message(basic_deliver.delivery_tag) try: body_json = json.loads(body.decode(self.fsencoding))[0] LOGGER.debug(f"Received message # {basic_deliver.delivery_tag} " f"from {properties.app_id} " f"({threading.currentThread().ident}): " f"{json.dumps(body_json)}, props: {properties}") result = request_processor.process_request(body_json) status_code = result['status_code'] reply_body = json.dumps(result['body']) except Exception as e: reply_body = '{"message": "%s"}' % str(e) status_code = requests.codes.internal_server_error tb = traceback.format_exc() LOGGER.error(tb) if properties.reply_to is not None: reply_msg = { 'id': body_json['id'], 'headers': { 'Content-Type': body_json['headers']['Accept'], 'Content-Length': len(reply_body) }, 'statusCode': status_code, 'body': base64.b64encode(reply_body.encode()).decode(self.fsencoding), 'request': False } LOGGER.debug(f"reply: {json.dumps(reply_body)}") reply_properties = pika.BasicProperties( correlation_id=properties.correlation_id) self._channel.basic_publish( exchange=properties.headers['replyToExchange'], routing_key=properties.reply_to, body=json.dumps(reply_msg), properties=reply_properties)
def wrapper(*args, **kwargs): try: func(*args, **kwargs) except (MasterNodeCreationError, WorkerNodeCreationError, NFSNodeCreationError, ClusterJoiningError, ClusterInitializationError) as e: try: '''arg[0] refers to the current instance of the broker thread''' broker_instance = args[0] if broker_instance.body[ROLLBACK_FLAG]: broker_instance.cluster_rollback() except Exception as err: LOGGER.error('Failed to rollback cluster creation:%s', str(err)) except NodeCreationError as e: try: broker_instance = args[0] node_list = e.node_names if broker_instance.body[ROLLBACK_FLAG]: broker_instance.node_rollback(node_list) except Exception as err: LOGGER.error('Failed to rollback node creation:%s', str(err))
def get_cluster_and_broker(request_data, tenant_auth_token, is_jwt_token): cluster_name = request_data[RequestKey.CLUSTER_NAME] vcd_broker = VcdBroker(tenant_auth_token, is_jwt_token) try: return vcd_broker.get_cluster_info(request_data), vcd_broker except ClusterNotFoundError as err: # continue searching using PksBrokers LOGGER.debug(f"{err}") except CseDuplicateClusterError as err: # fail because multiple clusters with same name exist # only case is when multiple same-name clusters exist across orgs # and sys admin tries to do a cluster operation LOGGER.debug(f"{err}") raise except Exception as err: LOGGER.error(f"Unknown error: {err}", exc_info=True) raise pks_ctx_list = create_pks_context_for_all_accounts_in_org( tenant_auth_token, is_jwt_token) for pks_ctx in pks_ctx_list: debug_msg = f"Get cluster info for cluster '{cluster_name}' " \ f"failed on host '{pks_ctx['host']}' with error: " pks_broker = PksBroker(pks_ctx, tenant_auth_token, is_jwt_token) try: return pks_broker.get_cluster_info(request_data), pks_broker except (PksClusterNotFoundError, PksServerError) as err: # continue searching using other PksBrokers LOGGER.debug(f"{debug_msg}{err}") except PksDuplicateClusterError as err: # fail because multiple clusters with same name exist LOGGER.debug(f"{debug_msg}{err}") raise except Exception as err: LOGGER.error(f"Unknown error: {err}", exc_info=True) raise # only raised if cluster was not found in VcdBroker or PksBrokers raise ClusterNotFoundError(f"Cluster '{cluster_name}' not found.")
def resolve_entity(self, entity_id: str) -> DefEntity: """Resolve the entity. Validates the entity against the schema. Based on the result, entity state will be either changed to "RESOLVED" (or) "RESOLUTION ERROR". :param str entity_id: Id of the entity :return: Defined entity with its state updated. :rtype: DefEntity """ response_body = self._cloudapi_client.do_request( method=RequestMethod.POST, cloudapi_version=CLOUDAPI_VERSION_1_0_0, resource_url_relative_path=f"{CloudApiResource.ENTITIES}/" f"{entity_id}/{CloudApiResource.ENTITY_RESOLVE}") # noqa: E501 msg = response_body[def_utils.DEF_ERROR_MESSAGE_KEY] del response_body[def_utils.DEF_ERROR_MESSAGE_KEY] entity = DefEntity(**response_body) # TODO: Just record the error message; revisit after HTTP response code # is good enough to decide if exception should be thrown or not if entity.state != def_utils.DEF_RESOLVED_STATE: LOGGER.error(msg) return entity
def init_cluster(vapp, template_name, template_revision): try: script_filepath = get_local_script_filepath(template_name, template_revision, ScriptFile.MASTER) script = utils.read_data_file(script_filepath, logger=LOGGER) node_names = get_node_names(vapp, NodeType.MASTER) result = execute_script_in_nodes(vapp=vapp, node_names=node_names, script=script) errors = _get_script_execution_errors(result) if errors: raise ScriptExecutionError( f"Initialize cluster script execution failed on node " f"{node_names}:{errors}") if result[0][0] != 0: raise ClusterInitializationError( f"Couldn't initialize cluster:\n{result[0][2].content.decode()}" ) # noqa: E501 except Exception as e: LOGGER.error(e, exc_info=True) raise ClusterInitializationError( f"Couldn't initialize cluster: {str(e)}")