def flush(self, m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids if not self.scheduled: emitter.publish("No maintenance schedule found on mesos master") return 0 # remove conflicting entries to_popi = [] for sched in self.scheduled['windows']: to_popj = [] for s in sched['machine_ids']: for h in m_ids: if compare_machine_ids(s, h): to_popj.append(j) break # remove offset = 0 for jj in to_popj: sched['machine_ids'].pop(jj - offset) offset += 1 if len(sched['machine_ids']) == 0: to_popi.append(i) offset = 0 for ii in to_popi: self.scheduled['windows'].pop(ii - offset) offset += 1 emitter.publish("Flushing specified host(s)") try: url = self.dcos_client.master_url('maintenance/schedule') http.post(url, data=None, json=self.scheduled) emitter.publish("Schedules updated") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master")
def shutdown_framework(self, framework_id): """Shuts down a Mesos framework :returns: None """ logger.info('Shutting down framework {}'.format(framework_id)) data = 'frameworkId={}'.format(framework_id) http.post(self._create_url('master/shutdown'), data=data)
def shutdown_framework(self, framework_id): """Shuts down a Mesos framework :param framework_id: ID of the framework to shutdown :type framework_id: str :returns: None """ logger.info('Shutting down framework {}'.format(framework_id)) data = 'frameworkId={}'.format(framework_id) url = self.master_url('master/shutdown') http.post(url, data=data, timeout=self._timeout)
def cosmos_post(self, request, params): """Request to cosmos server :param request: type of request :type requet: str :param params: body of request :type params: dict :returns: Response :rtype: Response """ url = urllib.parse.urljoin(self.cosmos_url, 'package/{}'.format(request)) try: response = http.post(url, json=params, headers=_get_cosmos_header(request)) if not _check_cosmos_header(request, response): raise DCOSException( "Server returned incorrect response type: {}".format( response.headers)) except DCOSAuthenticationException: raise except DCOSAuthorizationException: raise except DCOSHTTPException as e: # let non authentication responses be handled by `cosmos_error` so # we can expose errors reported by cosmos response = e.response return response
def up(self,m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids not_scheduled, down, draining = filter_agents(m_ids, self.full_maintenance_status) if len(draining) > 0: self.flush(machine_ids=draining) try: url = self.dcos_client.master_url('machine/up') http.post(url, data=None, json=down) emitter.publish("submitted hosts are now UP") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master") return 0
def unreserve_resource(agent, role): """ Unreserves all the resources for the role on the agent. """ resources = [] agent_id = agent['id'] reserved_resources_full = agent.get('reserved_resources_full', None) if not reserved_resources_full: # doesn't exist return True reserved_resources = reserved_resources_full.get(role, None) if not reserved_resources: # doesn't exist return True for reserved_resource in reserved_resources: resources.append(reserved_resource) req_url = urljoin(master_url(), 'unreserve') data = { 'slaveId': agent_id, 'resources': json.dumps(resources) } success = False try: response = http.post(req_url, data=data) success = 200 <= response.status_code < 300 except DCOSHTTPException as e: print("HTTP {}: Unabled to unreserve resources based on: {}".format( e.response.status_code, e.response.text)) return success
def restart_app(self, app_id, force=None): """Performs a rolling restart of all of the tasks. :param app_id: the id of the application to restart :type app_id: str :param force: whether to override running deployments :type force: bool :returns: the deployment id and version :rtype: dict """ app_id = self.normalize_app_id(app_id) if not force: params = None else: params = {'force': 'true'} url = self._create_url('v2/apps{}/restart'.format(app_id)) response = http.post(url, params=params, to_exception=_to_exception) return response.json()
def up(self, m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids not_scheduled, down, draining = filter_agents( m_ids, self.full_maintenance_status) if len(draining) > 0: self.flush(machine_ids=draining) try: url = self.dcos_client.master_url('machine/up') http.post(url, data=None, json=down) emitter.publish("submitted hosts are now UP") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master") return 0
def _cosmos_request(self, url, http_request_type, headers_preference, data=None, json=None, **kwargs): """ Gets a Response object obtained by calling cosmos at the url 'url'. Will attempt each of the headers in headers_preference in order until success. :param url: the url of a cosmos endpoint :type url: str :param headers_preference: a list of request headers in order of preference. Each header will be attempted until they all fail or the request succeeds. :type headers_preference: list[dict[str, str]] :param data: the request's body :type data: dict | bytes | file-like object :param json: JSON request body :type json: dict :param kwargs: Additional arguments to requests.request (see py:func:`request`) :type kwargs: dict :return: response returned by calling cosmos at url :rtype: requests.Response """ try: headers = headers_preference[0] if http_request_type is 'post': response = http.post(url, data=data, json=json, headers=headers, **kwargs) else: response = http.get(url, data=data, json=json, headers=headers, **kwargs) if not _matches_expected_response_header(headers, response.headers): raise DCOSException('Server returned incorrect response type, ' 'expected {} but got {}'.format( headers.get('Accept'), response.headers.get('Content-Type'))) return response except DCOSBadRequest as e: if len(headers_preference) > 1: # reattempt with one less item in headers_preference return self._cosmos_request(url, http_request_type, headers_preference[1:], data, json, **kwargs) else: raise e
def down(self, m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids not_scheduled, down, draining = filter_agents(m_ids, self.full_maintenance_status) self.schedule_maintenance(None,None, not_scheduled) to_down = not_scheduled + draining try: url = self.dcos_client.master_url('machine/down') http.post(url, data=None, json=to_down) emitter.publish("submitted hosts are now DOWN") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master") return 0
def schedule_maintenance(self, start, duration, m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids up, down, draining = filter_agents(m_ids, self.full_maintenance_status) machine_ids = up + draining if len(machine_ids) == 0: emitter.publish("Agents are already DOWN") if not duration: duration = long(DEFAULT_DURATION * stons) else: duration = long(int(duration) * stons) if not start: start = long(time.time() * stons) else: start = long(int(start) * stons) if not self.scheduled: self.scheduled = dict() self.scheduled['windows'] = [] unavailibitiyObj = dict() unavailibitiyObj['machine_ids'] = self.machine_ids unavailibitiyObj['unavailability'] = { "start": { "nanoseconds": start }, "duration": { "nanoseconds": duration } } self.scheduled['windows'].append(unavailibitiyObj) try: url = self.dcos_client.master_url('maintenance/schedule') http.post(url, data=None, json=self.scheduled) emitter.publish("Schedules updated") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master")
def down(self, m_ids=[]): if len(m_ids) == 0: m_ids = self.machine_ids not_scheduled, down, draining = filter_agents( m_ids, self.full_maintenance_status) self.schedule_maintenance(None, None, not_scheduled) to_down = not_scheduled + draining try: url = self.dcos_client.master_url('machine/down') http.post(url, data=None, json=to_down) emitter.publish("submitted hosts are now DOWN") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master") return 0
def _cosmos_request(self, url, http_request_type, headers_preference, data=None, json=None, **kwargs): """ Gets a Response object obtained by calling cosmos at the url 'url'. Will attempt each of the headers in headers_preference in order until success. :param url: the url of a cosmos endpoint :type url: str :param headers_preference: a list of request headers in order of preference. Each header will be attempted until they all fail or the request succeeds. :type headers_preference: list[dict[str, str]] :param data: the request's body :type data: dict | bytes | file-like object :param json: JSON request body :type json: dict :param kwargs: Additional arguments to requests.request (see py:func:`request`) :type kwargs: dict :return: response returned by calling cosmos at url :rtype: requests.Response """ try: headers = headers_preference[0] if http_request_type is 'post': response = http.post( url, data=data, json=json, headers=headers, **kwargs) else: response = http.get( url, data=data, json=json, headers=headers, **kwargs) if not _matches_expected_response_header(headers, response.headers): raise DCOSException( 'Server returned incorrect response type, ' 'expected {} but got {}'.format( headers.get('Accept'), response.headers.get('Content-Type'))) return response except DCOSBadRequest as e: if len(headers_preference) > 1: # reattempt with one less item in headers_preference return self._cosmos_request(url, http_request_type, headers_preference[1:], data, json, **kwargs) else: raise e
def _run(job_id): """ :param job_id: Id of the job :type job_id: str :returns: process return code :rtype: int """ timeout = _get_timeout() url = "{}/{}/runs".format(_get_api_url('v1/jobs'), job_id) try: http.post(url, timeout=timeout) except DCOSHTTPException as e: if e.response.status_code == 404: emitter.publish("Job ID: '{}' does not exist.".format(job_id)) else: emitter.publish("Error running job: '{}'".format(job_id)) return 0
def _segment_request(path, data): """ Send a segment.io HTTP request :param path: URL path :type path: str :param data: json POST data :type data: dict :rtype: None """ key = SEGMENT_IO_WRITE_KEY_PROD try: # Set both the connect timeout and the request timeout to 1s, # to prevent rollbar from hanging the CLI commands http.post('{}/{}'.format(SEGMENT_URL, path), json=data, auth=HTTPBasicAuth(key, ''), timeout=(1, 1)) except Exception as e: logger.exception(e)
def schedule_maintenance(self,start,duration, m_ids = []): if len(m_ids) == 0: m_ids = self.machine_ids up, down, draining = filter_agents(m_ids, self.full_maintenance_status) machine_ids = up + draining if len(machine_ids) == 0: emitter.publish("Agents are already DOWN") if not duration: duration = long( DEFAULT_DURATION * stons) else: duration = long(int(duration) * stons) if not start: start = long(time.time() * stons ) else: start = long(int(start) * stons) if not self.scheduled: self.scheduled = dict() self.scheduled['windows'] = [] unavailibitiyObj= dict() unavailibitiyObj['machine_ids'] = self.machine_ids unavailibitiyObj['unavailability']= { "start" : { "nanoseconds": start }, "duration" : { "nanoseconds": duration } } self.scheduled['windows'].append(unavailibitiyObj) try: url = self.dcos_client.master_url('maintenance/schedule') http.post(url, data=None, json=self.scheduled) emitter.publish("Schedules updated") except DCOSException as e: logger.exception(e) except Exception: raise DCOSException("Can't complete operation on mesos master")
def test_job(): shakedown.install_package_and_wait('chronos') # 0 tasks tasks = shakedown.get_service('chronos')['completed_tasks'] assert len(tasks) == 0 if is_before_version("3.0"): url = shakedown.dcos_service_url('chronos/scheduler/jobs') else: url = shakedown.dcos_service_url('chronos/v1/scheduler/jobs') jobs = http.get(url).json() assert len(jobs) == 0 # add a job if is_before_version("3.0"): url = shakedown.dcos_service_url('chronos/scheduler/iso8601') else: url = shakedown.dcos_service_url('chronos/v1/scheduler/iso8601') data = default_job() headers = {'Content-Type': 'application/json'} http.post(url, data=data, headers=headers) # give it a couple of seconds time.sleep(5) tasks = shakedown.get_service('chronos')['completed_tasks'] assert len(tasks) > 0 id = tasks[0]['id'] status, out = shakedown.run_command_on_master('date') sdate = out[:10] stdout, stderr, return_code = shakedown.run_dcos_command( 'task log --completed {}'.format(id)) assert sdate in stdout
def _post_job(job_json): """ :param job_json: json object representing a job :type job_file: json :returns: response json :rtype: json """ timeout = _get_timeout() url = _get_api_url('v1/jobs') response = http.post(url, json=job_json, timeout=timeout) return response.json()
def mark_agent_gone(self, agent_id): """Mark an agent as gone. :param agent_id: agent ID :type agent_id: str """ message = { 'type': "MARK_AGENT_GONE", 'mark_agent_gone': { 'agent_id': { 'value': agent_id } } } headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' } http.post(self.master_url('api/v1'), data=json.dumps(message), headers=headers)
def shutdown_framework(self, framework_id): """Shuts down a Mesos framework :param framework_id: ID of the framework to shutdown :type framework_id: str :returns: None """ logger.info('Shutting down framework {}'.format(framework_id)) data = 'frameworkId={}'.format(framework_id) url = self.master_url('master/teardown') # In Mesos 0.24, /shutdown was removed. # If /teardown doesn't exist, we try /shutdown. try: http.post(url, data=data, timeout=self._timeout) except DCOSHTTPException as e: if e.response.status_code == 404: url = self.master_url('master/shutdown') http.post(url, data=data, timeout=self._timeout) else: raise
def _do_request(url, method, timeout=None, stream=False, **kwargs): """ make HTTP request :param url: url :type url: string :param method: HTTP method, GET or POST :type method: string :param timeout: HTTP request timeout, default 3 seconds :type timeout: integer :param stream: stream parameter for requests lib :type stream: bool :return: http response :rtype: requests.Response """ def _is_success(status_code): # consider 400 and 503 to be successful status codes. # API will return the error message. if status_code in [200, 400, 503]: return True return False # if timeout is not passed, try to read `core.timeout` # if `core.timeout` is not set, default to 3 min. if timeout is None: timeout = config.get_config_val('core.timeout') if not timeout: timeout = 180 base_url = config.get_config_val("core.dcos_url") if not base_url: raise config.missing_config_exception(['core.dcos_url']) url = urllib.parse.urljoin(base_url, url) if method.lower() == 'get': http_response = http.get(url, is_success=_is_success, timeout=timeout, **kwargs) elif method.lower() == 'post': http_response = http.post(url, is_success=_is_success, timeout=timeout, stream=stream, **kwargs) else: raise DCOSException('Unsupported HTTP method: ' + method) return http_response
def destroy_cluster(name, internal): url = get_arangodb_webui(name, internal) + "/v1/destroy.json" try: response = http.post(url, timeout=60) except DCOSException as e: print("cannot connect to '" + url + "'", e) sys.exit(1) if response.status_code >= 200 and response.status_code < 300: json = response.json() return json else: print("Bad response getting mode. Status code: " + str(response.status_code)) sys.exit(1)
def _post_schedule(job_id, schedule_json): """ :param job_id: id of the job :type job_id: str :param schedule_json: json object representing a schedule :type schedule_json: json :returns: response json :rtype: json """ timeout = _get_timeout() url = "{}/{}/schedules".format(_get_api_url('v1/jobs'), job_id) response = http.post(url, json=schedule_json, timeout=timeout) return response.json()
def _do_request(url, method, timeout=None, stream=False, **kwargs): """ make HTTP request :param url: url :type url: string :param method: HTTP method, GET or POST :type method: string :param timeout: HTTP request timeout, default 3 seconds :type timeout: integer :param stream: stream parameter for requests lib :type stream: bool :return: http response :rtype: requests.Response """ def _is_success(status_code): # consider 400 and 503 to be successful status codes. # API will return the error message. if status_code in [200, 400, 503]: return True return False if timeout is None: timeout = _get_timeout() url = urllib.parse.urljoin(_get_metronome_url(), url) if method.lower() == 'get': http_response = http.get(url, is_success=_is_success, timeout=timeout, **kwargs) elif method.lower() == 'post': http_response = http.post(url, is_success=_is_success, timeout=timeout, stream=stream, **kwargs) elif method.lower() == 'delete': http_response = http.delete(url, is_success=_is_success, timeout=timeout, stream=stream, **kwargs) else: raise DCOSException('Unsupported HTTP method: ' + method) return http_response
def _do_request(url, method, timeout=None, stream=False, **kwargs): """ make HTTP request :param url: url :type url: string :param method: HTTP method, GET or POST :type method: string :param timeout: HTTP request timeout, default 3 seconds :type timeout: integer :param stream: stream parameter for requests lib :type stream: bool :return: http response :rtype: requests.Response """ def _is_success(status_code): # consider 400 and 503 to be successful status codes. # API will return the error message. if status_code in [200, 400, 503]: return True return False # if timeout is not passed, try to read `core.timeout` # if `core.timeout` is not set, default to 3 min. if timeout is None: timeout = config.get_config_val('core.timeout') if not timeout: timeout = 180 # POST to snapshot api base_url = config.get_config_val("core.dcos_url") if not base_url: raise config.missing_config_exception(['core.dcos_url']) url = urllib.parse.urljoin(base_url, url) if method.lower() == 'get': http_response = http.get(url, is_success=_is_success, timeout=timeout, **kwargs) elif method.lower() == 'post': http_response = http.post(url, is_success=_is_success, timeout=timeout, stream=stream, **kwargs) else: raise DCOSException('Unsupported HTTP method: ' + method) return http_response
def destroy_volume(agent, role): """ Deletes the volumes on the specific agent for the role """ volumes = [] agent_id = agent['id'] reserved_resources_full = agent.get('reserved_resources_full', None) if not reserved_resources_full: # doesn't exist return True reserved_resources = reserved_resources_full.get(role, None) if not reserved_resources: # doesn't exist return True for reserved_resource in reserved_resources: name = reserved_resource.get('name', None) disk = reserved_resource.get('disk', None) if name == 'disk' and disk is not None and 'persistence' in disk: volumes.append(reserved_resource) req_url = urljoin(master_url(), 'destroy-volumes') data = { 'slaveId': agent_id, 'volumes': json.dumps(volumes) } success = False try: response = http.post(req_url, data=data) success = 200 <= response.status_code < 300 if response.status_code == 409: # thoughts on what to do here? throw exception # i would rather not print print('''###\nIs a framework using these resources still installed?\n###''') except DCOSHTTPException as e: print("HTTP {}: Unabled to delete volume based on: {}".format( e.response.status_code, e.response.text)) return success
def create_group(self, group_resource): """Add a new group. :param group_resource: grouplication resource :type group_resource: dict, bytes or file :returns: the group description :rtype: dict """ url = self._create_url('v2/groups') # The file type exists only in Python 2, preventing type(...) is file. if hasattr(group_resource, 'read'): group_json = json.load(group_resource) else: group_json = group_resource response = http.post(url, json=group_json, to_exception=_to_exception) return response.json()
def _launch_nested_container_session(self): """Sends a request to the Mesos Agent to launch a new nested container and attach to its output stream. The output stream is then sent back in the response. """ message = { 'type': "LAUNCH_NESTED_CONTAINER_SESSION", 'launch_nested_container_session': { 'container_id': { 'parent': { 'value': self.parent_id }, 'value': self.container_id }, 'command': { 'value': self.cmd, 'arguments': [self.cmd] + self.args, 'shell': False } } } if self.tty: message['launch_nested_container_session']['container'] = { 'type': 'MESOS', 'tty_info': {} } req_extra_args = { 'stream': True, 'headers': { 'Content-Type': 'application/json', 'Accept': 'application/json+recordio', 'connection': 'keep-alive' } } response = http.post(self.agent_url, data=json.dumps(message), **req_extra_args) self._process_output_stream(response)
def add_app(self, app_resource): """Add a new application. :param app_resource: application resource :type app_resource: dict, bytes or file :returns: the application description :rtype: dict """ url = self._create_url("v2/apps") # The file type exists only in Python 2, preventing type(...) is file. if hasattr(app_resource, "read"): app_json = json.load(app_resource) else: app_json = app_resource response = http.post(url, json=app_json, to_exception=_to_exception) return response.json()
def _post(self, request, params, headers=None): """Request to cosmos server :param request: type of request :type requet: str :param params: body of request :type params: dict :param headers: list of headers for request in order of preference :type headers: [str] :returns: Response :rtype: Response """ url = urllib.parse.urljoin(self.cosmos_url, 'package/{}'.format(request)) if headers is None: headers = self._request_preferences().get(request) try: header_preference = headers.pop(0) version = header_preference.get("Accept").split("version=")[1] response = http.post(url, json=params, headers=header_preference) if not _check_cosmos_header(request, response, version): raise DCOSException( "Server returned incorrect response type: {}".format( response.headers)) except DCOSAuthenticationException: raise except DCOSAuthorizationException: raise except DCOSBadRequest as e: if len(headers) > 0: response = self._post(request, params, headers) else: response = e.response except DCOSHTTPException as e: # let non authentication responses be handled by `cosmos_error` so # we can expose errors reported by cosmos response = e.response return response
def _launch_nested_container_session(self): """Sends a request to the Mesos Agent to launch a new nested container and attach to its output stream. The output stream is then sent back in the response. """ message = { 'type': "LAUNCH_NESTED_CONTAINER_SESSION", 'launch_nested_container_session': { 'container_id': { 'parent': { 'value': self.parent_id }, 'value': self.container_id }, 'command': { 'value': self.cmd, 'arguments': [self.cmd] + self.args, 'shell': False}}} if self.tty: message[ 'launch_nested_container_session'][ 'container'] = { 'type': 'MESOS', 'tty_info': {}} req_extra_args = { 'stream': True, 'headers': { 'Content-Type': 'application/json', 'Accept': 'application/json+recordio', 'connection': 'keep-alive'}} response = http.post( self.agent_url, data=json.dumps(message), **req_extra_args) self._process_output_stream(response)
def _attach_container_input(self): """Streams all input data (e.g. STDIN) from the client to the agent """ def _initial_input_streamer(): """Generator function yielding the initial ATTACH_CONTAINER_INPUT message for streaming. We have a separate generator for this so that we can attempt the connection once before committing to a persistent connection where we stream the rest of the input. :returns: A RecordIO encoded message """ message = { 'type': 'ATTACH_CONTAINER_INPUT', 'attach_container_input': { 'type': 'CONTAINER_ID', 'container_id': { 'parent': { 'value': self.parent_id }, 'value': self.container_id}}} yield self.encoder.encode(message) def _input_streamer(): """Generator function yielding ATTACH_CONTAINER_INPUT messages for streaming. It yields the _intitial_input_streamer() message, followed by messages from the input_queue on each subsequent call. :returns: A RecordIO encoded message """ yield next(_initial_input_streamer()) while True: record = self.input_queue.get() if not record: break yield record req_extra_args = { 'headers': { 'Content-Type': 'application/json+recordio', 'Accept': 'application/json', 'Connection': 'close', 'Transfer-Encoding': 'chunked' } } # Ensure we don't try to attach our input to a container that isn't # fully up and running by waiting until the # `_process_output_stream` function signals us that it's ready. self.attach_input_event.wait() # Send an intial "Test" message to ensure that we can establish a # connection with the agent at all. If we can't we will throw an # exception and break out of this thread. http.post( self.agent_url, data=_initial_input_streamer(), **req_extra_args) # If we succeeded with that connection, unblock process_output_stream() # from sending output data to the output thread. self.print_output_event.set() # Begin streaming the the input. http.post( self.agent_url, data=_input_streamer(), **req_extra_args)
def _attach_container_input(self): """Streams all input data (e.g. STDIN) from the client to the agent """ def _initial_input_streamer(): """Generator function yielding the initial ATTACH_CONTAINER_INPUT message for streaming. We have a separate generator for this so that we can attempt the connection once before committing to a persistent connection where we stream the rest of the input. :returns: A RecordIO encoded message """ message = { 'type': 'ATTACH_CONTAINER_INPUT', 'attach_container_input': { 'type': 'CONTAINER_ID', 'container_id': { 'parent': self.parent_id, 'value': self.container_id}}} yield self.encoder.encode(message) def _input_streamer(): """Generator function yielding ATTACH_CONTAINER_INPUT messages for streaming. It yields the _intitial_input_streamer() message, followed by messages from the input_queue on each subsequent call. :returns: A RecordIO encoded message """ yield next(_initial_input_streamer()) while True: record = self.input_queue.get() if not record: break yield record req_extra_args = { 'headers': { 'Content-Type': 'application/recordio', 'Message-Content-Type': 'application/json', 'Accept': 'application/json', 'Connection': 'close', 'Transfer-Encoding': 'chunked' } } # Ensure we don't try to attach our input to a container that isn't # fully up and running by waiting until the # `_process_output_stream` function signals us that it's ready. self.attach_input_event.wait() # Send an intial "Test" message to ensure that we are able to # establish a connection with the agent. If we aren't we will throw # an exception and break out of this thread. However, in cases where # we receive a 500 response from the agent, we actually want to # continue without throwing an exception. A 500 error indicates that # we can't connect to the container because it has already finished # running. In that case we continue running to allow the output queue # to be flushed. try: http.post( self.agent_url, data=_initial_input_streamer(), **req_extra_args) except DCOSHTTPException as e: if not e.response.status_code == 500: raise e # If we succeeded with that connection, unblock process_output_stream() # from sending output data to the output thread. self.print_output_event.set() # Begin streaming the input. http.post( self.agent_url, data=_input_streamer(), **req_extra_args)
def post(dcos_mode, host, url, **kwargs): kwargs = enrich_args(host, **kwargs) if dcos_mode: return http.post(url, **kwargs) else: return requests.post(url, **kwargs)
def _attach_container_input(self): """Streams all input data (e.g. STDIN) from the client to the agent """ def _initial_input_streamer(): """Generator function yielding the initial ATTACH_CONTAINER_INPUT message for streaming. We have a separate generator for this so that we can attempt the connection once before committing to a persistent connection where we stream the rest of the input. :returns: A RecordIO encoded message """ message = { 'type': 'ATTACH_CONTAINER_INPUT', 'attach_container_input': { 'type': 'CONTAINER_ID', 'container_id': { 'parent': { 'value': self.parent_id }, 'value': self.container_id } } } yield self.encoder.encode(message) def _input_streamer(): """Generator function yielding ATTACH_CONTAINER_INPUT messages for streaming. It yields the _intitial_input_streamer() message, followed by messages from the input_queue on each subsequent call. :returns: A RecordIO encoded message """ yield next(_initial_input_streamer()) while True: record = self.input_queue.get() if not record: break yield record req_extra_args = { 'headers': { 'Content-Type': 'application/json+recordio', 'Accept': 'application/json', 'Connection': 'close', 'Transfer-Encoding': 'chunked' } } # Ensure we don't try to attach our input to a container that isn't # fully up and running by waiting until the # `_process_output_stream` function signals us that it's ready. self.attach_input_event.wait() # Send an intial "Test" message to ensure that we can establish a # connection with the agent at all. If we can't we will throw an # exception and break out of this thread. http.post(self.agent_url, data=_initial_input_streamer(), **req_extra_args) # If we succeeded with that connection, unblock process_output_stream() # from sending output data to the output thread. self.print_output_event.set() # Begin streaming the the input. http.post(self.agent_url, data=_input_streamer(), **req_extra_args)
def _link(dcos_url, provider_id): """ Link a DC/OS cluster to the current one. :param dcos_url: master ip of the cluster to link to :type dcos_url: str :param provider_id: login provider ID for the linked cluster :type provider_id: str :returns: process status :rtype: int """ current_cluster = cluster.get_attached_cluster() if not current_cluster: raise DCOSException('No cluster is attached, cannot link.') current_cluster_url = current_cluster.get_url() # Accept the same formats as the `setup` command # eg. "my-cluster.example.com" -> "https://my-cluster.example.com" dcos_url = jsonitem._parse_url(dcos_url) try: linked_cluster_ip = socket.gethostbyname(urlparse(dcos_url).netloc) except OSError as error: raise DCOSException("Unable to retrieve IP for '{dcos_url}': {error}" .format(dcos_url=dcos_url, error=error)) # Check if the linked cluster is already configured (based on its IP) for configured_cluster in cluster.get_clusters(): configured_cluster_host = \ urlparse(configured_cluster.get_url()).netloc configured_cluster_ip = socket.gethostbyname(configured_cluster_host) if linked_cluster_ip == configured_cluster_ip: linked_cluster_id = configured_cluster.get_cluster_id() linked_cluster_name = configured_cluster.get_name() break else: msg = ("The cluster you are linking to must be set up locally before\n" "running the `cluster link` command. To set it up now, run:\n" " $ dcos cluster setup {}".format(dcos_url)) raise DCOSException(msg) providers = auth.get_providers(dcos_url) if provider_id: if provider_id not in providers: raise DCOSException( "Incorrect provider ID '{}'.".format(provider_id)) provider_type = providers[provider_id]['authentication-type'] else: (provider_id, provider_type) = _prompt_for_login_provider(providers) message = { 'id': linked_cluster_id, 'name': linked_cluster_name, 'url': dcos_url, 'login_provider': { 'id': provider_id, 'type': provider_type}} headers = { 'Content-Type': 'application/json', 'Accept': 'application/json'} http.post( urllib.parse.urljoin(current_cluster_url, '/cluster/v1/links'), data=json.dumps(message), headers=headers) return 0
def _attach_container_input(self): """Streams all input data (e.g. STDIN) from the client to the agent """ def _initial_input_streamer(): """Generator function yielding the initial ATTACH_CONTAINER_INPUT message for streaming. We have a separate generator for this so that we can attempt the connection once before committing to a persistent connection where we stream the rest of the input. :returns: A RecordIO encoded message """ message = { 'type': 'ATTACH_CONTAINER_INPUT', 'attach_container_input': { 'type': 'CONTAINER_ID', 'container_id': { 'parent': self.parent_id, 'value': self.container_id } } } yield self.encoder.encode(message) def _input_streamer(): """Generator function yielding ATTACH_CONTAINER_INPUT messages for streaming. It yields the _intitial_input_streamer() message, followed by messages from the input_queue on each subsequent call. :returns: A RecordIO encoded message """ yield next(_initial_input_streamer()) while True: record = self.input_queue.get() if not record: break yield record req_extra_args = { 'headers': { 'Content-Type': 'application/recordio', 'Message-Content-Type': 'application/json', 'Accept': 'application/json', 'Connection': 'close', 'Transfer-Encoding': 'chunked' } } # Ensure we don't try to attach our input to a container that isn't # fully up and running by waiting until the # `_process_output_stream` function signals us that it's ready. self.attach_input_event.wait() # Send an intial "Test" message to ensure that we are able to # establish a connection with the agent. If we aren't we will throw # an exception and break out of this thread. However, in cases where # we receive a 500 response from the agent, we actually want to # continue without throwing an exception. A 500 error indicates that # we can't connect to the container because it has already finished # running. In that case we continue running to allow the output queue # to be flushed. try: http.post(self.agent_url, data=_initial_input_streamer(), **req_extra_args) except DCOSHTTPException as e: if not e.response.status_code == 500: raise e # If we succeeded with that connection, unblock process_output_stream() # from sending output data to the output thread. self.print_output_event.set() # Begin streaming the input. http.post(self.agent_url, data=_input_streamer(), **req_extra_args)