def start(self, **kwargs): """starts a server """ setup_env() status = 0 if kwargs.get('parallel', False): for server in self.servers: server.launch(**kwargs) if not kwargs.get('daemon', True): for server in self.servers: try: status += server.interact(**kwargs) except KeyboardInterrupt: print _('\nuser quit') script_logger.info("User Quit") self.stop(**kwargs) break elif kwargs.get('wait', True): for server in self.servers: status += server.wait(**kwargs) return status else: for server in self.servers: server.launch(**kwargs) status += server.wait(**kwargs) return status
def get_socket(conf, default_port=61005): """Bind socket to bind ip:port in conf :param conf: Configuration dict to read settings from :param default_port: port to use if not specified in conf :returns : a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ bind_addr = (conf.get('bind_ip', '0.0.0.0'), int(conf.get('bind_port', default_port))) address_family = [ addr[0] for addr in socket.getaddrinfo( bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6) ][0] sock = None bind_timeout = int(conf.get('bind_timeout', 30)) so_rcvbuf = conf.get('so_rcvbuf', None) retry_until = time.time() + bind_timeout warn_ssl = False while not sock and time.time() < retry_until: try: sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)), family=address_family) if 'cert_file' in conf: warn_ssl = True sock = ssl.wrap_socket(sock, certfile=conf['cert_file'], keyfile=conf['key_file']) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise sleep(0.1) if not sock: raise Exception( _('Could not bind to %s:%s ' 'after trying for %s seconds') % (bind_addr[0], bind_addr[1], bind_timeout)) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if so_rcvbuf: sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, int(so_rcvbuf)) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600) if warn_ssl: ssl_warning_message = _('WARNING: SSL should only be enabled for ' 'testing purposes. Use external SSL ' 'termination for a production deployment.') get_logger(conf).warning(ssl_warning_message) print(ssl_warning_message) return sock
def handle_ratelimit(self, req, account_name, container_name, obj_name): ''' Performs rate limiting and account white/black listing. Sleeps if necessary. If self.memcache_client is not set, immediately returns None. :param account_name: account name from path :param container_name: container name from path :param obj_name: object name from path ''' if not self.memcache_client: return None if account_name in self.ratelimit_blacklist: self.logger.error(_('Returning 497 because of blacklisting: %s'), account_name) eventlet.sleep(self.BLACK_LIST_SLEEP) return Response(status='497 Blacklisted', body='Your account has been blacklisted', request=req) if account_name in self.ratelimit_whitelist: return None for key, max_rate in self.get_ratelimitable_key_tuples( req, account_name, container_name=container_name, obj_name=obj_name): try: need_to_sleep = self._get_sleep_time(key, max_rate) if self.log_sleep_time_seconds and \ need_to_sleep > self.log_sleep_time_seconds: self.logger.warning( _("Ratelimit sleep log: %(sleep)s for " "%(account)s/%(container)s/%(object)s"), { 'sleep': need_to_sleep, 'account': account_name, 'container': container_name, 'object': obj_name }) if need_to_sleep > 0: eventlet.sleep(need_to_sleep) except MaxSleepTimeHitError as e: self.logger.error( _('Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s ' '. Ratelimit (Max Sleep) %(e)s'), { 'meth': req.method, 'acc': account_name, 'cont': container_name, 'obj': obj_name, 'e': str(e) }) error_resp = Response(status='498 Rate Limited', body='Slow down', request=req) return error_resp return None
def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH: resp = HTTPBadRequest(request=req) resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), MAX_ACCOUNT_NAME_LENGTH) return resp path = get_param(req, 'path') #delimiter = get_param(req, 'delimiter') #prefix = get_param(req, 'prefix') limit = get_param(req, 'limit') if limit and not limit.isdigit(): return HTTPPreconditionFailed(request=req, body='Value of limit must ' 'be a positive integer') if path: return HTTPBadRequest(request=req, body='Unsupported query ' 'parameter path') # partition, nodes = self.app.account_ring.get_nodes(self.account_name) node, filesystem, directory, global_map_version, component_number = \ self.app.account_ring.get_node(self.account_name) if not len(node): self.app.logger.error(_('%(msg)s %(method)s %(path)s'), {'msg': _('ERROR Wrong ring file content'), 'method': 'GET/HEAD', 'path': req.path}) return HTTPInternalServerError(request=req) #TODO Currently same component request is blocked until previous same #component request's map version is not updated, need to check other provision if not self.is_req_blocked(_('Account'), component_number): #return HTTPTemporaryRedirect(request=req, body = 'Component' # 'movement is in progress') # resp = self.GETorHEAD_base( # req, _('Account'), self.app.account_ring, partition, # req.swift_entity_path.rstrip('/')) resp = self.GETorHEAD_base( req, _('Account'), self.app.account_ring, node, filesystem, directory, req.swift_entity_path.rstrip('/'), global_map_version, component_number) resp = self.account_req_for_comp_distribution(req, component_number,\ global_map_version, self.account_name, resp) if int(resp.status.split()[0]) in (301, 307): resp = HTTPInternalServerError(request=req) if resp.status_int == HTTP_NOT_FOUND: if resp.headers.get('X-Account-Status', '').lower() == 'deleted': resp.status = HTTP_GONE if self.app.account_autocreate: resp = account_listing_response(self.account_name, req, get_listing_content_type(req)) if req.environ.get('swift_owner'): self.add_acls_from_sys_metadata(resp) else: for header in self.app.swift_owner_headers: resp.headers.pop(header, None) return resp
def _make_request_for_bulk_delete(self, nodes, filesystem, directory, \ method, path, headers, query, body_data, logger_thread_locals): self.app.logger.thread_locals = logger_thread_locals self.app.logger.debug("in _make_request_for_bulk_delete___") for node in nodes: try: start_node_timing = time.time() headers['Content-Length'] = len(body_data) with ConnectionTimeout(self.app.conn_timeout): conn = http_connect(node['ip'], node['port'], filesystem, directory, method, path, headers=headers, query_string=query) if conn: try: bytes_transferred = 0 data_size = len(body_data) while bytes_transferred < data_size: chunk = body_data[:REQ_BODY_CHUNK_SIZE] self.app.logger.debug("Sending chunk: %s" \ % chunk) conn.send(chunk) bytes_transferred += len(chunk) body_data = body_data[REQ_BODY_CHUNK_SIZE:] self.app.logger.debug("Total sent bytes: %s" \ % bytes_transferred) except Exception, ex: self.app.logger.error(\ "Error while sending body: %s" % ex) conn.node = node self.app.set_node_timing(node, time.time() - start_node_timing) with Timeout(self.app.node_timeout): resp = conn.getresponse() if not is_informational(resp.status) and \ not is_server_error(resp.status): return resp.status, resp.reason, resp.getheaders(), \ resp.read() elif resp.status == HTTP_INSUFFICIENT_STORAGE: self.app.logger.error(_('%(msg)s %(ip)s:%(port)s'), {'msg': _('ERROR Insufficient Storage'), \ 'ip': node['ip'], 'port': node['port']}) except (Exception, Timeout): self.app.exception_occurred( node, self.server_type, _('Trying to %(method)s %(path)s') % { 'method': method, 'path': path })
def get_conn_response(conn): node_timeout = int(self.conf.get('NODE_TIMEOUT', 600)) try: with Timeout(node_timeout): if conn.resp: self.logger.info("conn.resp returned") return (conn.resp, conn) else: self.logger.info("conn.getresponse()") return (conn.getresponse(), conn) except (Exception, Timeout): self.logger.exception(_("connection: %(conn)s \ get_put_response: %(status)s" ), \ {'conn': conn.node, \ 'status': _('Trying to get final status ')})
def BULK_DELETE(self, req): """HTTP BULK_DELETE request handler.""" self.app.logger.debug("In BULK_DELETE____") account_node, account_filesystem, account_directory, container_count, \ account_component_number, head_status = self.account_info(\ self.account_name, req) if not account_node and not head_status: return HTTPInternalServerError(request=req) if head_status and int(str(head_status).split()[0]) == 503: #TODO need to check why head_status is int or sometimes str self.app.logger.info("account HEAD returning 503 service " \ "unavailable error due to which this request got failed") return HTTPServiceUnavailable(request=req) if not account_node: return HTTPNotFound(request=req) container_node, container_filesystem, container_directory, \ global_map_version, component_number = \ self.app.container_ring.get_node( \ self.account_name, self.container_name) if not len(container_node): self.app.logger.error( _('%(msg)s %(method)s %(path)s'), { 'msg': _('ERROR Wrong ring file content'), 'method': 'BULK_DELETE', 'path': req.path }) return HTTPInternalServerError(request=req) self.app.logger.debug("Going for backend call______") try: headers = self._backend_requests(req, len(container_node), account_node, account_filesystem, account_directory, \ global_map_version, component_number, account_component_number) except ZeroDivisionError: self.app.logger.error( _('%(msg)s %(method)s %(path)s'), { 'msg': _('ERROR Wrong ring file content'), 'method': 'BULK_DELETE', 'path': req.path }) return HTTPInternalServerError(request=req) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) self.app.logger.debug("Now going to make request_____") resp = self.make_request_for_bulk_delete( req, self.app.container_ring, container_node, container_filesystem, container_directory, 'BULK_DELETE', req.swift_entity_path, headers) return resp
def _send_file(self, conn, path): """Method for sending data""" self.logger.info("send_data started") node_timeout = int(self.conf.get('NODE_TIMEOUT', 600)) bytes_transferred = 0 chunk = "" while conn.reader: chunk = conn.reader[:65536] bytes_transferred += len(chunk) if not conn.failed: try: with ChunkWriteTimeout(node_timeout): self.logger.debug('Sending chunk%s' % chunk) conn.send(chunk) conn.reader = conn.reader[ 65536:] #asrivastava: I think a send byte size should be checked. except (Exception, ChunkWriteTimeout): conn.failed = True self.logger.exception( _('ERROR with %(type)s server %(ip)s:%(port)s/ re: ' '%(info)s'), { 'type': "Container", 'ip': conn.node['ip'], 'port': conn.node['port'], 'info': "send file failed " }) self.logger.info("Bytes transferred: %s" % bytes_transferred) self.logger.info("Sent File completed")
def write(self, chunk): """ Write a chunk of data to disk. All invocations of this method must come before invoking the :func: For this implementation, the data is written into a temporary file. :param chunk: the chunk of data to write as a string object :returns: the total number of bytes written to an object """ try: while chunk: self._object_lib.write_chunk(self._fd_data, chunk, len(chunk), self._wpipe) ret = self._rpipe.read(4) written, = struct.unpack("i", ret) if (written == -1): raise IOError("write data file error") self._upload_size += written chunk = chunk[written:] except Exception as e: self._logger.error( _('ERROR DiskFile %(data_file)s' ' write failure: %(exc)s : %(stack)s'), { 'exc': e, 'stack': ''.join(traceback.format_stack()), 'data_file': self._name }) remove_file(self._tmppath_data) remove_file(self._tmppath_meta) raise return self._upload_size
def http_connect(ipaddr, port, device, partition, method, path, headers=None, query_string=None, ssl=False): """ Helper function to create an HTTPConnection object. If ssl is set True, HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection will be used, which is buffered for backend Swift services. :param ipaddr: IPv4 address to connect to :param port: port to connect to :param device: device of the node to query :param partition: partition on the device :param method: HTTP method to request ('GET', 'PUT', 'POST', etc.) :param path: request path :param headers: dictionary of headers :param query_string: request query string :param ssl: set True if SSL should be used (default: False) :returns: HTTPConnection object """ if isinstance(path, unicode): try: path = path.encode("utf-8") except UnicodeError as e: logging.exception(_('Error encoding to UTF-8: %s'), str(e)) path = quote('/' + device + '/' + str(partition) + path) return http_connect_raw(ipaddr, port, method, path, headers, query_string, ssl)
def read_transaction_journal(self, component_map): ''' :param transaction_journal_file system- return transaction_entries(dictionary:{'comp':[data]}) ''' try: self.logger.info("reading transaction_journal") compo_entries = \ libContainerLib.list_less__component_names__greater_() for i in component_map.values(): for comp in i: compo_entries.append(comp) node_hash = md5(self.id + "_" + self.local_leader_id) node_id = int(node_hash.hexdigest()[:8], 16) % 256 #Performance node_id = -1 self.logger.info("Node ID for this set of services is %r"% \ node_id) transObj = \ libTransactionLib.TransactionLibrary(self.__trans_path, node_id, self.__trans_port) #read data from journal self.logger.info("component : %s" % compo_entries) transaction_entries = transObj.extract_trans_journal_data( compo_entries) self.logger.info(_("Transaction Entries: %(transaction_entries)s"), {'transaction_entries': transaction_entries}) return transaction_entries except Exception as err: self.logger.error("Reading transaction Journal Failed: %s" % err) sys.exit(130)
def POST(self, req): """HTTP POST request handler.""" if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH: resp = HTTPBadRequest(request=req) resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), MAX_ACCOUNT_NAME_LENGTH) return resp error_response = check_metadata(req, 'account') if error_response: return error_response # account_partition, accounts = \ # self.app.account_ring.get_nodes(self.account_name) node, filesystem, directory, global_map_version, component_number = \ self.app.account_ring.get_node(self.account_name) if not len(node): self.app.logger.error(_('%(msg)s %(method)s %(path)s'), {'msg': _('ERROR Wrong ring file content'), 'method': 'POST', 'path': req.path}) return HTTPInternalServerError(request=req) headers = self.generate_request_headers(global_map_version, component_number, req, transfer=True) clear_info_cache(self.app, req.environ, self.account_name) if not self.is_req_blocked(_('Account'), component_number): #return HTTPTemporaryRedirect(request=req, body = 'Component' # 'movement is in progress') # resp = self.make_requests( # req, self.app.account_ring, account_partition, 'POST', # req.swift_entity_path, [headers] * len(accounts)) resp = self.make_requests( req, self.app.account_ring, node, filesystem, directory, 'POST', req.swift_entity_path, [headers] * len(node)) resp = self.account_req_for_comp_distribution(req, component_number,\ global_map_version, self.account_name, resp) if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate: self.autocreate_account(req.environ, self.account_name) # resp = self.make_requests( # req, self.app.account_ring, account_partition, 'POST', # req.swift_entity_path, [headers] * len(accounts)) resp = self.make_requests( req, self.app.account_ring, node, filesystem, directory, 'POST', req.swift_entity_path, [headers] * len(node)) resp = self.account_req_for_comp_distribution(req, component_number,\ global_map_version, self.account_name, resp) if int(resp.status.split()[0]) in (301, 307): resp = HTTPInternalServerError(request=req) self.add_acls_from_sys_metadata(resp) return resp
def _exception_occurred(self, server, e, action='talking', sock=None, fp=None, got_connection=True): if isinstance(e, Timeout): logging.error(_("Timeout %(action)s to memcached: %(server)s"), { 'action': action, 'server': server }) else: logging.exception(_("Error %(action)s to memcached: %(server)s"), { 'action': action, 'server': server }) try: if fp: fp.close() del fp except Exception: pass try: if sock: sock.close() del sock except Exception: pass if got_connection: # We need to return something to the pool # A new connection will be created the next time it is retreived self._return_conn(server, None, None) now = time.time() self._errors[server].append(time.time()) if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._errors[server] = [ err for err in self._errors[server] if err > now - ERROR_LIMIT_TIME ] if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._error_limited[server] = now + ERROR_LIMIT_DURATION logging.error(_('Error limiting server %s'), server)
def getresponse(self): response = HTTPConnection.getresponse(self) logging.debug( _("HTTP PERF: %(time).5f seconds to %(method)s " "%(host)s:%(port)s %(path)s)"), { 'time': time.time() - self._connected_time, 'method': self._method, 'host': self.host, 'port': self.port, 'path': self._path }) return response
def signal_pids(self, sig, **kwargs): """Send a signal to pids for this server :param sig: signal to send :returns: a dict mapping pids (ints) to pid_files (paths) """ pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): try: if sig != signal.SIG_DFL: print _('Signal %s pid: %s signal: %s') % (self.server, pid, sig) script_logger.info("Signal %s pid: %s signal: %s" %(self.server, pid, sig)) os.kill(pid, sig) except OSError as e: if e.errno == errno.ESRCH: # pid does not exist if kwargs.get('verbose'): print _("Removing stale pid file %s") % pid_file script_logger.info("Removing stale pid file %s" %pid_file) remove_file(pid_file) elif e.errno == errno.EPERM: print _("No permission to signal PID %d") % pid script_logger.error("No permission to signal PID %d" %pid) else: # process exists pids[pid] = pid_file return pids
def setup_env(): """Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp """ try: resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) except ValueError: print _("WARNING: Unable to modify file descriptor limit. " "Running as non-root?") script_logger.error("WARNING: Unable to modify file descriptor limit. Running as non-root?") try: resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: print _("WARNING: Unable to modify memory limit. " "Running as non-root?") script_logger.error("WARNING: Unable to modify memory limit. Running as non-root?") try: resource.setrlimit(resource.RLIMIT_NPROC, (MAX_PROCS, MAX_PROCS)) except ValueError: print _("WARNING: Unable to modify max process limit. " "Running as non-root?") script_logger.error("WARNING: Unable to modify max process limit. Running as non-root?") # Set PYTHON_EGG_CACHE if it isn't already set os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def status(self, pids=None, **kwargs): """Display status of server :param: pids, if not supplied pids will be populated automatically :param: number, if supplied will only lookup the nth server :returns: 1 if server is not running, 0 otherwise """ if pids is None: pids = self.get_running_pids(**kwargs) if not pids: number = kwargs.get('number', 0) if number: kwargs['quiet'] = True conf_files = self.conf_files(**kwargs) if conf_files: print _("%s #%d not running (%s)") % (self.server, number, conf_files[0]) script_logger.info("%s #%d not running (%s)" %(self.server, number, conf_files[0])) else: print _("No %s running") % self.server script_logger.error("No %s running" %self.server) return 1 for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) print _("%s running (%s - %s)") % (self.server, pid, conf_file) script_logger.info("%s running (%s - %s)" %(self.server, pid, conf_file)) return 0
def make_request(self, method, path, headers, acceptable_statuses, body_file=None): """ Makes a request to Swift with retries. :param method: HTTP method of request. :param path: Path of request. :param headers: Headers to be sent with request. :param acceptable_statuses: List of acceptable statuses for request. :param body_file: Body file to be passed along with request, defaults to None. :returns : Response object on success. :raises UnexpectedResponse: Exception raised when make_request() fails to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = dict(headers) headers['user-agent'] = self.user_agent resp = exc_type = exc_value = exc_traceback = None for attempt in xrange(self.request_tries): req = Request.blank(path, environ={'REQUEST_METHOD': method}, headers=headers) if body_file is not None: if hasattr(body_file, 'seek'): body_file.seek(0) req.body_file = body_file try: resp = req.get_response(self.app) if resp.status_int in acceptable_statuses or \ resp.status_int // 100 in acceptable_statuses: return resp except (Exception, Timeout): exc_type, exc_value, exc_traceback = exc_info() # sleep only between tries, not after each one if attempt < self.request_tries - 1: sleep(2**(attempt + 1)) if resp: raise UnexpectedResponse( _('Unexpected response: %s') % resp.status, resp) if exc_type: # To make pep8 tool happy, in place of raise t, v, tb: raise exc_type(*exc_value.args), None, exc_traceback
def conf_files(self, **kwargs): """Get conf files for this server :param: number, if supplied will only lookup the nth server :returns: list of conf files """ if self.server in STANDALONE_SERVERS: found_conf_files = search_tree(SWIFT_DIR, self.server + '*', '.conf', dir_ext='.conf.d') else: found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type, '.conf', dir_ext='.conf.d') if self.type == 'proxy': found_conf_files = search_tree(OSD_DIR, '%s-server*' % self.type, '.conf', dir_ext='.conf.d') number = kwargs.get('number') if number: try: conf_files = [found_conf_files[number - 1]] except IndexError: conf_files = [] else: conf_files = found_conf_files if not conf_files: # maybe there's a config file(s) out there, but I couldn't find it! if not kwargs.get('quiet'): print _('Unable to locate config %sfor %s') % ( ('number %s ' % number if number else ''), self.server) script_logger.error("Unable to locate config for %s" %self.server) if kwargs.get('verbose') and not kwargs.get('quiet'): if found_conf_files: print _('Found configs:') for i, conf_file in enumerate(found_conf_files): script_logger.info("Found configs: %d) %s" %(i + 1, conf_file)) print ' %d) %s' % (i + 1, conf_file) return conf_files
def __get_node_ip(self, hostname): """ Get internal node ip on which service is running """ try: command = "grep -w " + hostname + " /etc/hosts | awk {'print $1'}" child = subprocess.Popen(command, stdout = subprocess.PIPE, \ stderr = subprocess.PIPE, shell = True) std_out, std_err = child.communicate() return std_out.strip() except Exception as err: self.logger.exception(_('Error while getting ip of node:%s' % err)) return ""
def execute(self, command): """ Executing command """ try: child = subprocess.Popen(command, stdout = subprocess.PIPE, \ stderr = subprocess.PIPE, shell = True) std_out, std_err = child.communicate() ret_val = child.returncode return std_out, std_err, ret_val except Exception as err: self.logger.exception(_('While executing command:%s, error:%s' \ %(command, err))) return '', '', ''
def DELETE(self, req): """HTTP DELETE request handler.""" # Extra safety in case someone typos a query string for an # account-level DELETE request that was really meant to be caught by # some middleware. if req.query_string: return HTTPBadRequest(request=req) if not self.app.allow_account_management: return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(self.allowed_methods)}) # account_partition, accounts = \ # self.app.account_ring.get_nodes(self.account_name) node, filesystem, directory, global_map_version, component_number = \ self.app.account_ring.get_node(self.account_name) if not len(node): self.app.logger.error(_('%(msg)s %(method)s %(path)s'), {'msg': _('ERROR Wrong ring file content'), 'method': 'DELETE', 'path': req.path}) return HTTPInternalServerError(request=req) headers = self.generate_request_headers(global_map_version, component_number, req) clear_info_cache(self.app, req.environ, self.account_name) if not self.is_req_blocked(_('Account'), component_number): #return HTTPTemporaryRedirect(request=req, body = 'Component' # 'movement is in progress') # resp = self.make_requests( # req, self.app.account_ring, account_partition, 'DELETE', # req.swift_entity_path, [headers] * len(accounts)) resp = self.make_requests( req, self.app.account_ring, node, filesystem, directory, 'DELETE', req.swift_entity_path, [headers] * len(node)) resp = self.account_req_for_comp_distribution(req, component_number,\ global_map_version, self.account_name, resp) if int(resp.status.split()[0]) in (301, 307): resp = HTTPInternalServerError(request=req) return resp
def exception_occurred(self, node, typ, additional_info): """ Handle logging of generic exceptions. :param node: dictionary of node to log the error for :param typ: server type :param additional_info: additional information to log """ self.logger.exception( _('ERROR with %(type)s server %(ip)s:%(port)s/ re: ' '%(info)s'), { 'type': typ, 'ip': node['ip'], 'port': node['port'], 'info': additional_info })
def __connect_target_node(self, target_service, comp_list, \ logger_thread_locals, method): self.logger.thread_locals = logger_thread_locals #client_timeout = int(self.conf.get('CLIENT_TIMEOUT', 500)) conn_timeout = float(self.conf.get('CONNECTION_TIMEOUT', 30)) node_timeout = int(self.conf.get('NODE_TIMEOUT', 600)) #Some values are needed for connections. filesystem = 'export' directory = 'OSP_01' path = '/recovery_process/' try: # create headers headers = dict() headers['Expect'] = '100-continue' headers['X-Timestamp'] = time.time() headers['Content-Type'] = 'text/plain' headers['X-GLOBAL-MAP-VERSION'] = self._latest_gl_version headers['Content-Length'] = len(pickle.dumps(comp_list)) with ConnectionTimeout(conn_timeout): conn = http_connect(target_service.get_ip(), target_service.get_port(), filesystem, directory, method, path, headers) with Timeout(node_timeout): resp = conn.getexpect() if resp.status == HTTP_CONTINUE: conn.resp = None conn.target_service = target_service self.logger.info("HTTP continue %s" % resp.status) return conn else: self.logger.error("Http connection status: %s" % resp.status) return None except (Exception, Timeout) as err: self.logger.exception( _('ERROR with %(type)s server %(ip)s:%(port)s/ re: ' '%(info)s'), { 'type': "Account", 'ip': target_service.get_ip(), 'port': target_service.get_port(), #check 'info': "Expect: 100-continue on " })
def handle_request(self, env, start_response): trans_id = generate_trans_id(self.trans_id_suffix) env['swift.trans_id'] = trans_id self.logger.txn_id = trans_id try: # catch any errors in the pipeline resp = self._app_call(env) except: # noqa self.logger.exception(_('Error: An error occurred')) resp = HTTPServerError(request=Request(env), body='An error occurred', content_type='text/plain') resp.headers['X-Trans-Id'] = trans_id return resp(env, start_response) # make sure the response has the trans_id if self._response_headers is None: self._response_headers = [] self._response_headers.append(('X-Trans-Id', trans_id)) start_response(self._response_status, self._response_headers, self._response_exc_info) return resp
def read_container_journal(self, component_map): ''' :param container_map return container_entries (dictionary=[{component_no:[list_of_records],...}]) >>>{'x1':[1,2,3],'x2':[4,5,6]} ''' try: self.logger.info("reading_container journal %s %s"% \ (component_map, self.__cont_journal_path)) node_hash = md5(self.id + "_" + self.local_leader_id) node_id = int(node_hash.hexdigest()[:8], 16) % 256 #Performance node_id = -1 self.logger.info("Node ID for this set of services is %r %r"% \ (node_id, self.__cont_journal_path)) lib_impl = LibraryImpl(self.__cont_path, \ node_id) comp_entries = libContainerLib.list_less__component_names__greater_( ) for i in component_map.values(): for comp in i: comp_entries.append(comp) self.logger.info("component : %s" % comp_entries) container_entries = lib_impl.extract_cont_journal_data( comp_entries) self.logger.info(_("Container Entries: %(cont_entries)s"), \ {'cont_entries': container_entries}) return container_entries except Exception as err: self.logger.error("Reading container Journal Failed: %s" % err) sys.exit(130)
def __send_data(self, conn, service_obj): """Method for sending data""" self.logger.info("send_data started") node_timeout = int(self.conf.get('NODE_TIMEOUT', 600)) bytes_transferred = 0 chunk = "" while conn.reader: chunk = conn.reader[:65536] bytes_transferred += len(chunk) try: with ChunkWriteTimeout(node_timeout): self.logger.debug('Sending chunk%s' % chunk) conn.send(chunk) conn.reader = conn.reader[65536:] except (Exception, ChunkWriteTimeout): self.logger.exception( _('ERROR with %(type)s server %(ip)s:%(port)s/ re: ' '%(info)s'), { 'type': "Account", 'ip': service_obj.get_ip(), 'port': service_obj.get_port(), 'info': "send data failed " }) self.logger.info("Bytes transferred: %s" % bytes_transferred)
def __call__(self, env, start_response): """ WSGI entry point. Wraps env in swob.Request object and passes it down. :param env: WSGI environment dictionary :param start_response: WSGI callable """ req = Request(env) if self.memcache_client is None: self.memcache_client = cache_from_env(env) if not self.memcache_client: self.logger.warning( _('Warning: Cannot ratelimit without a memcached client')) return self.app(env, start_response) try: version, account, container, obj = req.split_path(1, 4, True) except ValueError: return self.app(env, start_response) ratelimit_resp = self.handle_ratelimit(req, account, container, obj) if ratelimit_resp is None: return self.app(env, start_response) else: return ratelimit_resp(env, start_response)
def close(self): """ Close the open file handle if present. For this specific implementation, this method will handle quarantining the file if necessary. """ if self._fp: try: if self._started_at_0 and self._read_to_eof: self._handle_close_quarantine() except DiskFileQuarantined: raise except (Exception, Timeout) as e: self._logger.error( _('ERROR DiskFile %(data_file)s' ' close failure: %(exc)s : %(stack)s'), { 'exc': e, 'stack': ''.join(traceback.format_stack()), 'data_file': self._data_file }) finally: fp, self._fp = self._fp, None fp.close()
def stop(self, **kwargs): """stops a server """ server_pids = {} for server in self.servers: signaled_pids = server.stop(**kwargs) if not signaled_pids: print _('No %s running') % server script_logger.info("No %s running" %server) else: server_pids[server] = signaled_pids if not server_pids: return 0 # all signaled_pids, i.e. list(itertools.chain(*server_pids.values())) signaled_pids = [p for server, pids in server_pids.items() for p in pids] # keep track of the pids yeiled back as killed for all servers killed_pids = set() kill_wait = kwargs.get('kill_wait', KILL_WAIT) for server, killed_pid in watch_server_pids(server_pids, interval=kill_wait, **kwargs): print _("%s (%s) appears to have stopped") % (server, killed_pid) script_logger.info("%s [%s] appears to have stopped " %(server, killed_pid)) killed_pids.add(killed_pid) if not killed_pids.symmetric_difference(signaled_pids): # all proccesses have been stopped return 0 # reached interval n watch_pids w/o killing all servers for server, pids in server_pids.items(): if not killed_pids.issuperset(pids): # some pids of this server were not killed print _('Waited %s seconds for %s to die; giving up') % ( kill_wait, server) script_logger.info("Waited for %s seconds for %s to die; giving up" %(kill_wait, server)) return 1