def __init__(self, app, conf): self.app = app self.logger = get_logger(conf) self.connection = conf.get('connection') maxsize = config_auto_int_value(conf.get('cache_size'), 1000) maxtime = config_auto_int_value(conf.get('cache_ttl'), 30) self._load_rules_matcher = LRUCache(maxsize=maxsize, maxtime=maxtime)( self._build_rules_matcher)
def __init__(self, app, conf): self.app = app self.logger = get_logger(conf) self.connection = conf.get('connection') maxsize = config_auto_int_value(conf.get('cache_size'), 1000) maxtime = config_auto_int_value(conf.get('cache_ttl'), 30) if maxsize > 0: if maxtime <= 0: maxtime = float('inf') self._load_rules_matcher = tlru_cache( maxsize=maxsize, maxtime=maxtime)(self._build_rules_matcher) else: self._load_rules_matcher = self._build_rules_matcher
def __init__(self, conf): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = get_logger(conf, log_route="object-replicator") self.devices_dir = conf.get("devices", "/srv/node") self.mount_check = config_true_value(conf.get("mount_check", "true")) self.vm_test_mode = config_true_value(conf.get("vm_test_mode", "no")) self.swift_dir = conf.get("swift_dir", "/etc/swift") self.port = int(conf.get("bind_port", 6000)) self.concurrency = int(conf.get("concurrency", 1)) self.stats_interval = int(conf.get("stats_interval", "300")) self.ring_check_interval = int(conf.get("ring_check_interval", 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get("reclaim_age", 86400 * 7)) self.partition_times = [] self.run_pause = int(conf.get("run_pause", 30)) self.rsync_timeout = int(conf.get("rsync_timeout", 900)) self.rsync_io_timeout = conf.get("rsync_io_timeout", "30") self.rsync_bwlimit = conf.get("rsync_bwlimit", "0") self.http_timeout = int(conf.get("http_timeout", 60)) self.lockup_timeout = int(conf.get("lockup_timeout", 1800)) self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift") self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get("conn_timeout", 0.5)) self.node_timeout = float(conf.get("node_timeout", 10)) self.sync_method = getattr(self, conf.get("sync_method") or "rsync") self.network_chunk_size = int(conf.get("network_chunk_size", 65536)) self.headers = {"Content-Length": "0", "user-agent": "object-replicator %s" % os.getpid()} self.rsync_error_log_line_length = int(conf.get("rsync_error_log_line_length", 0)) self.handoffs_first = config_true_value(conf.get("handoffs_first", False)) self.handoff_delete = config_auto_int_value(conf.get("handoff_delete", "auto"), 0) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = PrefixLoggerAdapter( logger or get_logger(conf, log_route='object-replicator'), {}) self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ int(conf.get('bind_port', 6200)) self.concurrency = int(conf.get('concurrency', 1)) self.replicator_workers = int(conf.get('replicator_workers', 0)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.replication_cycle = random.randint(0, 9) self.partition_times = [] self.interval = int( conf.get('interval') or conf.get('run_pause') or 30) self.rsync_timeout = int( conf.get('rsync_timeout', DEFAULT_RSYNC_TIMEOUT)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::object' self.http_timeout = int(conf.get('http_timeout', 60)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self._next_rcache_update = time.time() + self.stats_interval self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.default_headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid() } self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value( conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): self.logger.warning('Handoff only mode is not intended for normal ' 'operation, please disable handoffs_first and ' 'handoff_delete before the next ' 'normal rebalance') self.is_multiprocess_worker = None self._df_router = DiskFileRouter(conf, self.logger) self._child_process_reaper_queue = queue.LightQueue()
def __init__(self, app, conf): super(OioMemcacheMiddleware, self).__init__(app, conf) self.memcache_dict = None if config_true_value(conf.get('oio_cache', 'true')): oio_cache_ttl = config_auto_int_value( conf.pop('oio_cache_ttl', None), 24 * 3600) self.memcache_dict = MemcacheDict(self.memcache, ttl=oio_cache_ttl)
def __init__(self, conf, logger): self.conf = conf self.logger = logger self.sock = None self.children = [] self.worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT)
def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = logger or get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::object' if config_true_value(conf.get('vm_test_mode', 'no')): self.logger.warn('Option object-replicator/vm_test_mode is ' 'deprecated and will be removed in a future ' 'version. Update your configuration to use ' 'option object-replicator/rsync_module.') self.rsync_module += '{replication_port}' self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.default_headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid()} self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value(conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): self.logger.warn('Handoff only mode is not intended for normal ' 'operation, please disable handoffs_first and ' 'handoff_delete before the next ' 'normal rebalance') self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0): self.conf = conf self.logger = logger self.devices = devices self.max_files_per_second = float(conf.get('files_per_second', 20)) self.max_bytes_per_second = float( conf.get('bytes_per_second', 10000000)) try: # ideally unless ops overrides the rsync_tempfile_timeout in the # auditor section we can base our behavior on whatever they # configure for their replicator replicator_config = readconf(self.conf['__file__'], 'object-replicator') except (KeyError, ValueError, IOError): # if we can't parse the real config (generally a KeyError on # __file__, or ValueError on no object-replicator section, or # IOError if reading the file failed) we use # a very conservative default for rsync_timeout default_rsync_timeout = 86400 else: replicator_rsync_timeout = int( replicator_config.get('rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT)) # Here we can do some light math for ops and use the *replicator's* # rsync_timeout (plus 15 mins to avoid deleting local tempfiles # before the remote replicator kills it's rsync) default_rsync_timeout = replicator_rsync_timeout + 900 # there's not really a good reason to assume the replicator # section's reclaim_age is more appropriate than the reconstructor # reclaim_age - but we're already parsing the config so we can set # the default value in our config if it's not already set if 'reclaim_age' in replicator_config: conf.setdefault('reclaim_age', replicator_config['reclaim_age']) self.rsync_tempfile_timeout = config_auto_int_value( self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout) self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger) self.auditor_type = 'ALL' self.zero_byte_only_at_fps = zero_byte_only_at_fps if self.zero_byte_only_at_fps: self.max_files_per_second = float(self.zero_byte_only_at_fps) self.auditor_type = 'ZBF' self.log_time = int(conf.get('log_time', 3600)) self.last_logged = 0 self.files_running_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_files_processed = 0 self.passes = 0 self.quarantines = 0 self.errors = 0 self.rcache = rcache self.stats_sizes = sorted( [int(s) for s in list_from_csv(conf.get('object_size_stats'))]) self.stats_buckets = dict([(s, 0) for s in self.stats_sizes + ['OVER']])
def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0): self.conf = conf self.logger = logger self.devices = devices self.max_files_per_second = float(conf.get('files_per_second', 20)) self.max_bytes_per_second = float(conf.get('bytes_per_second', 10000000)) try: # ideally unless ops overrides the rsync_tempfile_timeout in the # auditor section we can base our behavior on whatever they # configure for their replicator replicator_config = readconf(self.conf['__file__'], 'object-replicator') except (KeyError, ValueError, IOError): # if we can't parse the real config (generally a KeyError on # __file__, or ValueError on no object-replicator section, or # IOError if reading the file failed) we use # a very conservative default for rsync_timeout default_rsync_timeout = 86400 else: replicator_rsync_timeout = int(replicator_config.get( 'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT)) # Here we can do some light math for ops and use the *replicator's* # rsync_timeout (plus 15 mins to avoid deleting local tempfiles # before the remote replicator kills it's rsync) default_rsync_timeout = replicator_rsync_timeout + 900 # there's not really a good reason to assume the replicator # section's reclaim_age is more appropriate than the reconstructor # reclaim_age - but we're already parsing the config so we can set # the default value in our config if it's not already set if 'reclaim_age' in replicator_config: conf.setdefault('reclaim_age', replicator_config['reclaim_age']) self.rsync_tempfile_timeout = config_auto_int_value( self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout) self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger) self.auditor_type = 'ALL' self.zero_byte_only_at_fps = zero_byte_only_at_fps if self.zero_byte_only_at_fps: self.max_files_per_second = float(self.zero_byte_only_at_fps) self.auditor_type = 'ZBF' self.log_time = int(conf.get('log_time', 3600)) self.last_logged = 0 self.files_running_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_files_processed = 0 self.passes = 0 self.quarantines = 0 self.errors = 0 self.rcache = rcache self.stats_sizes = sorted( [int(s) for s in list_from_csv(conf.get('object_size_stats'))]) self.stats_buckets = dict( [(s, 0) for s in self.stats_sizes + ['OVER']])
def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = logger or get_logger(conf, log_route="object-replicator") self.devices_dir = conf.get("devices", "/srv/node") self.mount_check = config_true_value(conf.get("mount_check", "true")) self.swift_dir = conf.get("swift_dir", "/etc/swift") self.bind_ip = conf.get("bind_ip", "0.0.0.0") self.servers_per_port = int(conf.get("servers_per_port", "0") or 0) self.port = None if self.servers_per_port else int(conf.get("bind_port", 6000)) self.concurrency = int(conf.get("concurrency", 1)) self.stats_interval = int(conf.get("stats_interval", "300")) self.ring_check_interval = int(conf.get("ring_check_interval", 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get("reclaim_age", 86400 * 7)) self.partition_times = [] self.interval = int(conf.get("interval") or conf.get("run_pause") or 30) self.rsync_timeout = int(conf.get("rsync_timeout", 900)) self.rsync_io_timeout = conf.get("rsync_io_timeout", "30") self.rsync_bwlimit = conf.get("rsync_bwlimit", "0") self.rsync_compress = config_true_value(conf.get("rsync_compress", "no")) self.rsync_module = conf.get("rsync_module", "").rstrip("/") if not self.rsync_module: self.rsync_module = "{replication_ip}::object" if config_true_value(conf.get("vm_test_mode", "no")): self.logger.warning( "Option object-replicator/vm_test_mode " "is deprecated and will be removed in a " "future version. Update your " "configuration to use option " "object-replicator/rsync_module." ) self.rsync_module += "{replication_port}" self.http_timeout = int(conf.get("http_timeout", 60)) self.lockup_timeout = int(conf.get("lockup_timeout", 1800)) self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift") self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get("conn_timeout", 0.5)) self.node_timeout = float(conf.get("node_timeout", 10)) self.sync_method = getattr(self, conf.get("sync_method") or "rsync") self.network_chunk_size = int(conf.get("network_chunk_size", 65536)) self.default_headers = {"Content-Length": "0", "user-agent": "object-replicator %s" % os.getpid()} self.rsync_error_log_line_length = int(conf.get("rsync_error_log_line_length", 0)) self.handoffs_first = config_true_value(conf.get("handoffs_first", False)) self.handoff_delete = config_auto_int_value(conf.get("handoff_delete", "auto"), 0) if any((self.handoff_delete, self.handoffs_first)): self.logger.warning( "Handoff only mode is not intended for normal " "operation, please disable handoffs_first and " "handoff_delete before the next " "normal rebalance" ) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, base_conf, override_conf, app): def get(key, default): return override_conf.get(key, base_conf.get(key, default)) self.sorting_method = get('sorting_method', 'shuffle').lower() if self.sorting_method not in VALID_SORTING_METHODS: raise ValueError( 'Invalid sorting_method value; must be one of %s, not %r' % ( ', '.join(VALID_SORTING_METHODS), self.sorting_method)) self.read_affinity = get('read_affinity', '') try: self.read_affinity_sort_key = affinity_key_function( self.read_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid read_affinity value: %r (%s)" % (self.read_affinity, err.args[0])) self.write_affinity = get('write_affinity', '') try: self.write_affinity_is_local_fn \ = affinity_locality_predicate(self.write_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid write_affinity value: %r (%s)" % (self.write_affinity, err.args[0])) self.write_affinity_node_count = get( 'write_affinity_node_count', '2 * replicas').lower() value = self.write_affinity_node_count.split() if len(value) == 1: wanc_value = int(value[0]) self.write_affinity_node_count_fn = lambda replicas: wanc_value elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas': wanc_value = int(value[0]) self.write_affinity_node_count_fn = \ lambda replicas: wanc_value * replicas else: raise ValueError( 'Invalid write_affinity_node_count value: %r' % (' '.join(value))) self.write_affinity_handoff_delete_count = config_auto_int_value( get('write_affinity_handoff_delete_count', 'auto'), None ) self.rebalance_missing_suppression_count = int(get( 'rebalance_missing_suppression_count', 1)) self.concurrent_gets = config_true_value(get('concurrent_gets', False)) self.concurrency_timeout = float(get( 'concurrency_timeout', app.conn_timeout)) self.concurrent_ec_extra_requests = int(get( 'concurrent_ec_extra_requests', 0))
def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = logger or get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.interval = int( conf.get('interval') or conf.get('run_pause') or 30) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.default_headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid() } self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value( conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): self.logger.warn('Handoff only mode is not intended for normal ' 'operation, please disable handoffs_first and ' 'handoff_delete before the next ' 'normal rebalance') self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, conf): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid()} self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value(conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, base_conf, override_conf): def get(key, default): return override_conf.get(key, base_conf.get(key, default)) self.sorting_method = get('sorting_method', 'shuffle').lower() if self.sorting_method not in VALID_SORTING_METHODS: raise ValueError( 'Invalid sorting_method value; must be one of %s, not %r' % ( ', '.join(VALID_SORTING_METHODS), self.sorting_method)) self.read_affinity = get('read_affinity', '') try: self.read_affinity_sort_key = affinity_key_function( self.read_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid read_affinity value: %r (%s)" % (self.read_affinity, err.args[0])) self.write_affinity = get('write_affinity', '') try: self.write_affinity_is_local_fn \ = affinity_locality_predicate(self.write_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid write_affinity value: %r (%s)" % (self.write_affinity, err.args[0])) self.write_affinity_node_count = get( 'write_affinity_node_count', '2 * replicas').lower() value = self.write_affinity_node_count.split() if len(value) == 1: wanc_value = int(value[0]) self.write_affinity_node_count_fn = lambda replicas: wanc_value elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas': wanc_value = int(value[0]) self.write_affinity_node_count_fn = \ lambda replicas: wanc_value * replicas else: raise ValueError( 'Invalid write_affinity_node_count value: %r' % (' '.join(value))) self.write_affinity_handoff_delete_count = config_auto_int_value( get('write_affinity_handoff_delete_count', 'auto'), None )
def __init__(self, conf): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.port = int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.object_ring = Ring(self.swift_dir, ring_name='object') self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.run_pause = int(conf.get('run_pause', 30)) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536)) self.headers = { 'Content-Length': '0', 'user-agent': 'obj-replicator %s' % os.getpid() } self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value( conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __init__(self, base_conf, override_conf): def get(key, default): return override_conf.get(key, base_conf.get(key, default)) self.sorting_method = get('sorting_method', 'shuffle').lower() self.read_affinity = get('read_affinity', '') try: self.read_affinity_sort_key = affinity_key_function( self.read_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid read_affinity value: %r (%s)" % (self.read_affinity, err.message)) self.write_affinity = get('write_affinity', '') try: self.write_affinity_is_local_fn \ = affinity_locality_predicate(self.write_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid write_affinity value: %r (%s)" % (self.write_affinity, err.message)) self.write_affinity_node_count = get( 'write_affinity_node_count', '2 * replicas').lower() value = self.write_affinity_node_count.split() if len(value) == 1: wanc_value = int(value[0]) self.write_affinity_node_count_fn = lambda replicas: wanc_value elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas': wanc_value = int(value[0]) self.write_affinity_node_count_fn = \ lambda replicas: wanc_value * replicas else: raise ValueError( 'Invalid write_affinity_node_count value: %r' % (' '.join(value))) self.write_affinity_handoff_delete_count = config_auto_int_value( get('write_affinity_handoff_delete_count', 'auto'), None )
def __init__(self, conf): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.port = int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.object_ring = Ring(self.swift_dir, ring_name='object') self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.run_pause = int(conf.get('run_pause', 30)) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self.headers = { 'Content-Length': '0', 'user-agent': 'obj-replicator %s' % os.getpid()} self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value(conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0)
def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server using the specified number of workers. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print(e) return 1 print('conf,logger,log_name',conf,logger,log_name) # bind to address and port try: sock = get_socket(conf) except ConfigFilePortError: msg = 'bind_port wasn\'t properly set in the config file. ' \ 'It must be explicitly set to a valid port number.' logger.error(msg) print(msg) return 1 print('sock',sock) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) print('**** calling loadapp function start *****') #loadapp(conf_path, global_conf=global_conf) print('**** calling loadapp function end *****') # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) print('reserve',reserve) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) print('capture_studio done') worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) print('worker_count',worker_count) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock, global_conf=global_conf) print('run_server when worker_count is 0') return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() print('pid',pid) if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) print('') print('in run_wsgi now calling run_server,conf,logger,sock',conf,logger,sock) print('') run_server(conf, logger, sock) logger.notice('Child %d exiting normally' % os.getpid()) return 0 else: logger.notice('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error('Removing dead child %s' % pid) children.remove(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice('User quit') break greenio.shutdown_safe(sock) sock.close() logger.notice('Exited') return 0
def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf self.logger = PrefixLoggerAdapter( logger or get_logger(conf, log_route='object-replicator'), {}) self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ int(conf.get('bind_port', 6200)) self.concurrency = int(conf.get('concurrency', 1)) self.replicator_workers = int(conf.get('replicator_workers', 0)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.next_check = time.time() + self.ring_check_interval self.replication_cycle = random.randint(0, 9) self.partition_times = [] self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) if 'run_pause' in conf and 'interval' not in conf: self.logger.warning('Option object-replicator/run_pause ' 'is deprecated and will be removed in a ' 'future version. Update your configuration' ' to use option object-replicator/' 'interval.') self.rsync_timeout = int(conf.get('rsync_timeout', DEFAULT_RSYNC_TIMEOUT)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::object' self.http_timeout = int(conf.get('http_timeout', 60)) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") self._next_rcache_update = time.time() + self.stats_interval self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.default_headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid()} self.rsync_error_log_line_length = \ int(conf.get('rsync_error_log_line_length', 0)) self.handoffs_first = config_true_value(conf.get('handoffs_first', False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): self.logger.warning('Handoff only mode is not intended for normal ' 'operation, please disable handoffs_first and ' 'handoff_delete before the next ' 'normal rebalance') self.is_multiprocess_worker = None self._df_router = DiskFileRouter(conf, self.logger) self._child_process_reaper_queue = queue.LightQueue()
def main(args): parser = argparse.ArgumentParser( description='Relink and cleanup objects to increase partition power') parser.add_argument('action', choices=['relink', 'cleanup']) parser.add_argument( 'conf_file', nargs='?', help=('Path to config file with [object-relinker] section')) parser.add_argument('--swift-dir', default=None, dest='swift_dir', help='Path to swift directory') parser.add_argument( '--policy', default=[], dest='policies', action='append', type=policy, help='Policy to relink; may specify multiple (default: all)') parser.add_argument('--devices', default=None, dest='devices', help='Path to swift device directory') parser.add_argument('--user', default=None, dest='user', help='Drop privileges to this user before relinking') parser.add_argument('--device', default=[], dest='device_list', action='append', help='Device name to relink (default: all)') parser.add_argument('--partition', '-p', default=[], dest='partitions', type=non_negative_int, action='append', help='Partition to relink (default: all)') parser.add_argument('--skip-mount-check', default=False, help='Don\'t test if disk is mounted', action="store_true", dest='skip_mount_check') parser.add_argument('--files-per-second', default=None, type=non_negative_float, dest='files_per_second', help='Used to limit I/O. Zero implies no limit ' '(default: no limit).') parser.add_argument('--stats-interval', default=None, type=non_negative_float, dest='stats_interval', help='Emit stats to recon roughly every N seconds. ' '(default: %d).' % DEFAULT_STATS_INTERVAL) parser.add_argument('--workers', default=None, type=auto_or_int, help=('Process devices across N workers ' '(default: one worker per device)')) parser.add_argument('--logfile', default=None, dest='logfile', help='Set log file name. Ignored if using conf_file.') parser.add_argument('--debug', default=False, action='store_true', help='Enable debug mode') args = parser.parse_args(args) hubs.use_hub(get_hub()) if args.conf_file: conf = readconf(args.conf_file, 'object-relinker') if args.debug: conf['log_level'] = 'DEBUG' user = args.user or conf.get('user') if user: drop_privileges(user) logger = get_logger(conf) else: conf = {'log_level': 'DEBUG' if args.debug else 'INFO'} if args.user: # Drop privs before creating log file drop_privileges(args.user) conf['user'] = args.user logging.basicConfig( format='%(message)s', level=logging.DEBUG if args.debug else logging.INFO, filename=args.logfile) logger = logging.getLogger() conf.update({ 'swift_dir': args.swift_dir or conf.get('swift_dir', '/etc/swift'), 'devices': args.devices or conf.get('devices', '/srv/node'), 'mount_check': (config_true_value(conf.get('mount_check', 'true')) and not args.skip_mount_check), 'files_per_second': (args.files_per_second if args.files_per_second is not None else non_negative_float(conf.get('files_per_second', '0'))), 'policies': set(args.policies) or POLICIES, 'partitions': set(args.partitions), 'workers': config_auto_int_value( conf.get('workers') if args.workers is None else args.workers, 'auto'), 'recon_cache_path': conf.get('recon_cache_path', DEFAULT_RECON_CACHE_PATH), 'stats_interval': non_negative_float( args.stats_interval or conf.get('stats_interval', DEFAULT_STATS_INTERVAL)), }) return parallel_process(args.action == 'cleanup', conf, logger, args.device_list)
def auto_or_int(value): return config_auto_int_value(value, default='auto')
def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server using the specified number of workers. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print e return 1 # bind to address and port sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) run_server(conf, logger, sock) logger.notice('Child %d exiting normally' % os.getpid()) return 0 else: logger.notice('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error('Removing dead child %s' % pid) children.remove(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice('User quit') break greenio.shutdown_safe(sock) sock.close() logger.notice('Exited') return 0
def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server using the specified number of workers. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print e return 1 # bind to address and port sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'ubuntu')) # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) #with open("/home/ubuntu/transition_statistics.txt", "a") as tran_file: # tran_file.write("ESTOU NO FORK!!!\n") oracle_plus.get_oracle_plus().set_attributes(len(children)) oracle.get_oracle().set_attributes(int(conf.get('number_of_replicas', 3)), float(conf.get('average_window_size', 10)), config_true_value(conf.get('use_adaptation', True)), int(conf.get('initial_write_quorum_size', 2)), int(conf.get('initial_read_quorum_size', 2)), conf.get('oracle_log_file', '/home/ubuntu/oracle.txt'), conf.get('oracle_lib_path', '/home/ubuntu/oracle_files/oracle_twitter'), conf.get('oracle_model_path', '/home/ubuntu/oracle_files/oracle_twitter'), conf.get('ip', ''), int(conf.get('port', '')), conf.get('master_ip', ''), conf.get('slave_ips', ''), int(conf.get('replica_reconciliation_timeout', 2)), len(children), worker_count) run_server(conf, logger, sock) logger.notice('Child %d exiting normally' % os.getpid()) return 0 else: logger.notice('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error('Removing dead child %s' % pid) children.remove(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice('User quit') break greenio.shutdown_safe(sock) sock.close() logger.notice('Exited') return 0
def __init__(self, app, conf): super(HealthCheckMiddleware, self).__init__(app, conf) self.status_path = conf.get('status_path', STATUS_PATH) counters = conf.get('oioswift_counters', {}) self.cur_reqs = counters.get('current_requests') self.workers = config_auto_int_value(conf.get('workers'), 1)
# bind to address and port sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) # Ensure the application can be loaded before proceeding. loadapp(conf_path, global_conf={'log_name': log_name}) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock) return def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete"""
def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server using the specified number of workers. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print e return 1 # bind to address and port sock = get_socket(conf, default_port=kwargs.get("default_port", 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get("user", "swift")) # Ensure the configuration and application can be loaded before proceeding. global_conf = {"log_name": log_name} if "global_conf_callback" in kwargs: kwargs["global_conf_callback"](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get("fallocate_reserve", 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) worker_count = config_auto_int_value(conf.get("workers"), CPU_COUNT) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error("SIGTERM received") signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error("SIGHUP received") signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) run_server(conf, logger, sock) logger.notice("Child %d exiting normally" % os.getpid()) return 0 else: logger.notice("Started child %s" % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error("Removing dead child %s" % pid) children.remove(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice("User quit") break greenio.shutdown_safe(sock) sock.close() logger.notice("Exited") return 0