コード例 #1
0
ファイル: wsgi.py プロジェクト: confi-surya/pythonicPracto
def get_socket(conf, default_port=61005):
    """Bind socket to bind ip:port in conf

    :param conf: Configuration dict to read settings from
    :param default_port: port to use if not specified in conf

    :returns : a socket object as returned from socket.listen or
               ssl.wrap_socket if conf specifies cert_file
    """
    bind_addr = (conf.get('bind_ip',
                          '0.0.0.0'), int(conf.get('bind_port', default_port)))
    address_family = [
        addr[0] for addr in socket.getaddrinfo(
            bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)
    ][0]
    sock = None
    bind_timeout = int(conf.get('bind_timeout', 30))
    so_rcvbuf = conf.get('so_rcvbuf', None)
    retry_until = time.time() + bind_timeout
    warn_ssl = False
    while not sock and time.time() < retry_until:
        try:
            sock = listen(bind_addr,
                          backlog=int(conf.get('backlog', 4096)),
                          family=address_family)
            if 'cert_file' in conf:
                warn_ssl = True
                sock = ssl.wrap_socket(sock,
                                       certfile=conf['cert_file'],
                                       keyfile=conf['key_file'])
        except socket.error as err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
    if not sock:
        raise Exception(
            _('Could not bind to %s:%s '
              'after trying for %s seconds') %
            (bind_addr[0], bind_addr[1], bind_timeout))
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
    if so_rcvbuf:
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, int(so_rcvbuf))
    sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    if hasattr(socket, 'TCP_KEEPIDLE'):
        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
    if warn_ssl:
        ssl_warning_message = _('WARNING: SSL should only be enabled for '
                                'testing purposes. Use external SSL '
                                'termination for a production deployment.')
        get_logger(conf).warning(ssl_warning_message)
        print(ssl_warning_message)
    return sock
コード例 #2
0
    def __init__(self, app, conf, logger=None):
        self.app = app
        self.log_hdrs = config_true_value(
            conf.get('access_log_headers', conf.get('log_headers', 'no')))
        log_hdrs_only = list_from_csv(conf.get('access_log_headers_only', ''))
        self.log_hdrs_only = [x.title() for x in log_hdrs_only]

        # The leading access_* check is in case someone assumes that
        # log_statsd_valid_http_methods behaves like the other log_statsd_*
        # settings.
        self.valid_methods = conf.get(
            'access_log_statsd_valid_http_methods',
            conf.get('log_statsd_valid_http_methods',
                     'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
        self.valid_methods = [
            m.strip().upper() for m in self.valid_methods.split(',')
            if m.strip()
        ]
        access_log_conf = {}
        for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
                    'log_udp_port', 'log_statsd_host', 'log_statsd_port',
                    'log_statsd_default_sample_rate',
                    'log_statsd_sample_rate_factor',
                    'log_statsd_metric_prefix'):
            value = conf.get('access_' + key, conf.get(key, None))
            if value:
                access_log_conf[key] = value
        self.access_logger = logger or get_logger(
            access_log_conf,
            log_route='proxy-access',
            section='proxy-access',
            fmt="%(asctime)s %(message)s")
        self.access_logger.set_statsd_prefix('proxy-server')
        self.reveal_sensitive_prefix = int(
            conf.get('reveal_sensitive_prefix', MAX_HEADER_SIZE))
コード例 #3
0
ファイル: wsgi.py プロジェクト: confi-surya/pythonicPracto
def _initrp(conf_path, app_section, *args, **kwargs):
    try:
        conf = appconfig(conf_path, name=app_section)
    except Exception as e:
        raise ConfigFileError("Error trying to load config from %s: %s" %
                              (conf_path, e))

    validate_configuration()

    # pre-configure logger
    log_name = conf.get('log_name', app_section)
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = get_logger(conf,
                            log_name,
                            log_to_console=kwargs.pop('verbose', False),
                            log_route='wsgi')

    # disable fallocate if desired
    if config_true_value(conf.get('disable_fallocate', 'no')):
        disable_fallocate()
    conf.update({'llport': kwargs.pop('llport', 61014)})
    monkey_patch_mimetools()
    return (conf, logger, log_name)
コード例 #4
0
 def __init__(self, app, conf):
     self.app = app
     self.filters = [
         IPfilter(conf),
         RequestFilter(conf),
         BucketFilter(conf)
     ]
     self.logger = utils.get_logger(conf, log_route='ceilometer')
コード例 #5
0
ファイル: daemon.py プロジェクト: confi-surya/pythonicPracto
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower()
    conf = utils.readconf(conf_file,
                          section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf,
                                  conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    reserve = int(conf.get('fallocate_reserve', 0))
    if reserve > 0:
        utils.FALLOCATE_RESERVE = reserve

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
    # some platforms. This locks in reported times to the timezone in which
    # the server first starts running in locations that periodically change
    # timezones.
    os.environ['TZ'] = time.strftime("%z", time.gmtime())

    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
コード例 #6
0
 def setUp(self):
     self.gl_map_object = GLmap()
     self.conf = {'CLIENT_TIMEOUT': 10}
     self.communicator = "None_for_now"
     self.service_id = "HN0101_61014_container-server"
     self.logger = get_logger({}, log_route='recovery')
     self.__trans_path = "/export/HN0101_61014_transaction_journal"
     self.__cont_path = "/export/HN0101_61014_container_journal"
     osd.containerService.container_recovery.final_recovery_status_list = list(
     )
     osd.containerService.container_recovery.list_of_tuple = queue(
         maxsize=0)
     osd.containerService.container_recovery.Thread = Monitor()
コード例 #7
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.forbidden_chars = self.conf.get('forbidden_chars',
                                          FORBIDDEN_CHARS)
     self.maximum_length = int(self.conf.get('maximum_length', MAX_LENGTH))
     self.forbidden_regexp = self.conf.get('forbidden_regexp',
                                           FORBIDDEN_REGEXP)
     if self.forbidden_regexp:
         self.forbidden_regexp_compiled = re.compile(self.forbidden_regexp)
     else:
         self.forbidden_regexp_compiled = None
     self.logger = get_logger(self.conf, log_route='name_check')
コード例 #8
0
    def __init__(self,
                 conf,
                 global_map_object,
                 service_id,
                 communicator_object,
                 Request_handler,
                 logger=None,
                 source_service_obj=None):

        self.logger = logger or get_logger(conf,
                                           log_route='container-recovery')
        libraryUtils.OSDLoggerImpl("container-library").initialize_logger()
        self.logger.info("running RecoveryMgr ")
        self.communicator_object = communicator_object
        self.Request_handler = Request_handler
        self.conf = conf
        self.__trans_port = int(conf['child_port'])
        self.global_map_object = global_map_object
        self.source_service_obj = source_service_obj
        self.service_id = service_id
        self.id = self.service_id.split('_')[0]
        self.local_leader_id = self.service_id.split('_')[1]

        self.__cont_journal_path = ''.join('.' + self.id +"_" +self.local_leader_id \
                                    + "_container_journal")
        self.__trans_journal_path = ''.join('.' + self.id + "_" +self.local_leader_id \
                                    + "_transaction_journal")
        self.__cont_path = EXPORT + self.__cont_journal_path
        self.__trans_path = EXPORT + self.__trans_journal_path
        self.logger.debug("Initiaization entries: %s %s %s %s %s %s " % \
            (self.global_map_object, \
             self.service_id, self.__cont_journal_path, \
             self.__trans_journal_path, self.__cont_path, self.__trans_path))

        #get object gl version from global leader
        self._object_gl_version = self.__get_object_gl_version()
        self.logger.debug("Object version received: %s" %
                          self._object_gl_version)

        #Map from gl
        self.map_gl = self.global_map_object.dest_comp_map()
        self.logger.debug("Map from Gl Received %s" % self.map_gl)

        #start heart beat
        self.logger.debug("Made heart beat thread")
        #self.heart_beat_thread.spawn(self.start_heart_beat, self.Request_handler)
        self.heart_beat_gthread = eventlet.greenthread.spawn(
            self.start_heart_beat, self.Request_handler)
        self.logger.debug("Started heart beat thread func")
コード例 #9
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='tempauth')
     self.log_headers = config_true_value(conf.get('log_headers', 'f'))
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.logger.set_statsd_prefix(
         'tempauth.%s' %
         (self.reseller_prefix if self.reseller_prefix else 'NONE', ))
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix or not self.auth_prefix.strip('/'):
         self.logger.warning('Rewriting invalid auth prefix "%s" to '
                             '"/auth/" (Non-empty auth prefix path '
                             'is required)' % self.auth_prefix)
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.token_life = int(conf.get('token_life', 86400))
     self.allow_overrides = config_true_value(
         conf.get('allow_overrides', 't'))
     self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
     self.users = {}
     for conf_key in conf:
         if conf_key.startswith('user_') or conf_key.startswith('user64_'):
             account, username = conf_key.split('_', 1)[1].split('_')
             if conf_key.startswith('user64_'):
                 # Because trailing equal signs would screw up config file
                 # parsing, we auto-pad with '=' chars.
                 account += '=' * (len(account) % 4)
                 account = base64.b64decode(account)
                 username += '=' * (len(username) % 4)
                 username = base64.b64decode(username)
             values = conf[conf_key].split()
             if not values:
                 raise ValueError('%s has no key set' % conf_key)
             key = values.pop(0)
             if values and ('://' in values[-1] or '$HOST' in values[-1]):
                 url = values.pop()
             else:
                 url = '$HOST/v1/%s%s' % (self.reseller_prefix, account)
             self.users[account + ':' + username] = {
                 'key': key,
                 'url': url,
                 'groups': values
             }
コード例 #10
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = osd_utils.get_logger(conf, log_route='keystoneauth')
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.operator_roles = conf.get('operator_roles',
                                    'admin, swiftoperator').lower()
     self.reseller_admin_role = conf.get('reseller_admin_role',
                                         'ResellerAdmin').lower()
     config_is_admin = conf.get('is_admin', "false").lower()
     self.is_admin = osd_utils.config_true_value(config_is_admin)
     config_overrides = conf.get('allow_overrides', 't').lower()
     self.allow_overrides = osd_utils.config_true_value(config_overrides)
コード例 #11
0
 def __init__(self, app, conf, logger=None):
     self.app = app
     self.logger = logger or get_logger(conf)
     self.unit_time_period = int(
         conf.get('bandwidth_controlled_unit_time_period', 60))
     self.bw_control = str(conf.get('bandwidth_control', 'Disable'))
     self.next_unit_time = time.time() + self.unit_time_period
     self.account_limit = 30000 if not int(conf.get('bandwidth_controlled_account_limit', 0)) \
                          else int(conf.get('bandwidth_controlled_account_limit', 30000))
     self.def_max_bandwidth_read_limit = int(
         conf.get('default_max_bandwidth_read_limit', 0))
     self.def_max_bandwidth_write_limit = int(
         conf.get('default_max_bandwidth_write_limit', 0))
     #creating the cache with self.account_limit+1024 maximum entries of accounts
     self.cache = BWInfoCache(self.account_limit+1024, self.next_unit_time, \
                              self.unit_time_period, self.logger)
コード例 #12
0
 def __init__(self, conf):
     self.logger = utils.get_logger(conf, log_route='ceilometer')
     parser = ConfigParser()
     parser.read(
         conf.get(
             'ip_filter_config_path',
             '/opt/HYDRAstor/objectStorage/configFiles/osd_ip_filter.conf'))
     self.ips_list = [
         ip.strip()
         for ip in parser.get('ip_filter', 'internal_ip_list').split('\n')
     ]
     self.subnet_list = [
         subnet.strip() for subnet in parser.get(
             'ip_filter', 'internal_skip_list').split('\n')
     ]
     self.ip_headers = [
         header.strip()
         for header in parser.get('ip_filter', 'ip_headers').split('\n')
     ]
コード例 #13
0
ファイル: slo.py プロジェクト: confi-surya/pythonicPracto
 def __init__(self,
              app,
              conf,
              min_segment_size=DEFAULT_MIN_SEGMENT_SIZE,
              max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS,
              max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE):
     self.conf = conf
     self.app = app
     #        self.logger = get_logger(conf, log_route='slo')
     self.max_manifest_segments = max_manifest_segments
     self.max_manifest_size = max_manifest_size
     self.min_segment_size = min_segment_size
     self.max_get_time = int(self.conf.get('max_get_time', 86400))
     self.rate_limit_after_segment = int(
         self.conf.get('rate_limit_after_segment', '10'))
     self.rate_limit_segments_per_sec = int(
         self.conf.get('rate_limit_segments_per_sec', '0'))
     #        self.bulk_deleter = Bulk(app, {}, logger=self.logger)
     self.objs_to_delete = []
     self.logger = get_logger(self.conf, log_route='slo')
コード例 #14
0
    def __init__(self, app, conf):
        self.app = app
        self.publish_incoming_bytes = conf.get("publish_incoming_bytes", True)
        self.publish_outgoing_bytes = conf.get("publish_outgoing_bytes", True)
        self.publish_on_error = conf.get("publish_on_error", False)
        self.enable_filters = conf.get("enable_filters", True)
        self.error_on_status = [
            status.strip()
            for status in conf.get("error_on_status", '').split('\n')
        ]
        self.logger = utils.get_logger(conf, log_route='ceilometer')

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
コード例 #15
0
    def __init__(self, conf, dictionary_new, communicator_obj, \
            service_component_map, service_id, gl_communication_obj=None, \
            logger=None, object_gl_version=None, gl_map=None):
        self.logger = logger or get_logger({}, log_route='recovery-container')
        self.pile_size = len(dictionary_new.keys())
        self.logger.info("Started Recovery after restructuring...")
        self.dictionary_new = dictionary_new

        #destination map of gl
        self.Map_from_gl = gl_map
        self.logger.info("map from gl in recovery %s" % self.Map_from_gl)
        self.conf = conf
        self.container_node_list = \
        [ {'ip':"%s" % key.split(':')[0], 'port':"%s" % \
            key.split(':')[1]}for key \
            in dictionary_new.keys()]
        self.logger.debug("Container node list:%s ,Dictionary passed : %s" % \
            (self.container_node_list, self.dictionary_new))

        self.service_component_map = service_component_map
        self.communicator_obj = communicator_obj
        self.service_id = service_id
        self.gl_communication_obj = gl_communication_obj
        self.__object_gl_version = object_gl_version
コード例 #16
0
 def __init__(self, app, conf, logger=None):
     self.app = app
     if logger:
         self.logger = logger
     else:
         self.logger = get_logger(conf, log_route='ratelimit')
     self.account_ratelimit = float(conf.get('account_ratelimit', 0))
     self.max_sleep_time_seconds = \
         float(conf.get('max_sleep_time_seconds', 60))
     self.log_sleep_time_seconds = \
         float(conf.get('log_sleep_time_seconds', 0))
     self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
     self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
     self.ratelimit_whitelist = \
         [acc.strip() for acc in
             conf.get('account_whitelist', '').split(',') if acc.strip()]
     self.ratelimit_blacklist = \
         [acc.strip() for acc in
             conf.get('account_blacklist', '').split(',') if acc.strip()]
     self.memcache_client = None
     self.container_ratelimits = interpret_conf_limits(
         conf, 'container_ratelimit_')
     self.container_listing_ratelimits = interpret_conf_limits(
         conf, 'container_listing_ratelimit_')
コード例 #17
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='gatekeeper')
     self.inbound_condition = make_exclusion_test(inbound_exclusions)
     self.outbound_condition = make_exclusion_test(outbound_exclusions)
コード例 #18
0
ファイル: daemon.py プロジェクト: confi-surya/pythonicPracto
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(conf, log_route='daemon')
コード例 #19
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None,
                 object_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        libraryUtils.OSDLoggerImpl("proxy-monitoring").initialize_logger()
        create_recovery_file('proxy-server')
        self.ongoing_operation_list = []
        self.stop_service_flag = False
        osd_dir = conf.get('osd_dir', '/export/.osd_meta_config')
        static_config_file = conf.get('static_config_file', \
            '/opt/HYDRAstor/objectStorage/configFiles/static_proxy-server.conf')
        #swift_dir = conf.get('swift_dir', '/etc/swift')
        static_conf = self.readconf(static_config_file)
        self.logger.debug("Static config parameters:%s" % (static_conf))
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(static_conf.get('put_queue_depth', 5))
        self.retry_count = int(static_conf.get('retry_count', 3))
        self.request_retry_time_service_unavailable = int(\
            static_conf.get('request_retry_time_service_unavailable', 100))
        self.request_retry_time_component_transfer = int(\
            static_conf.get('request_retry_time_component_transfer', 50))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        #self.error_suppression_interval = \
        #    int(conf.get('error_suppression_interval', 60))
        #self.error_suppression_limit = \
        #    int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(static_conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'false'))
        self.object_ring = object_ring or ObjectRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.container_ring = container_ring or \
        ContainerRing(osd_dir, self.logger, self.node_timeout)
        self.account_ring = account_ring or AccountRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(osd_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(static_conf.get('account_autocreate', 'yes'))
        #self.expiring_objects_account = \
        #    (conf.get('auto_create_account_prefix') or '.') + \
        #    (conf.get('expiring_objects_account_name') or 'expiring_objects')
        #self.expiring_objects_container_divisor = \
        #    int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 10000000)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        #self.rate_limit_after_segment = \
        #    int(conf.get('rate_limit_after_segment', 10))
        #self.rate_limit_segments_per_sec = \
        #    int(conf.get('rate_limit_segments_per_sec', 1))
        #self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        #self.cors_allow_origin = [
        #    a.strip()
        #    for a in conf.get('cors_allow_origin', '').split(',')
        #    if a.strip()]
        #self.strict_cors_mode = config_true_value(
        #    conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        #self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        #self.max_large_object_get_time = float(
        #    conf.get('max_large_object_get_time', '86400'))
        #value = conf.get('request_node_count', '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid request_node_count value: %r' % ''.join(value))
        #try:
        #    self._read_affinity = read_affinity = conf.get('read_affinity', '')
        #    self.read_affinity_sort_key = affinity_key_function(read_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid read_affinity value: %r (%s)" %
        #                     (read_affinity, err.message))
        #try:
        #    write_affinity = conf.get('write_affinity', '')
        #    self.write_affinity_is_local_fn \
        #        = affinity_locality_predicate(write_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid write_affinity value: %r (%s)" %
        #                     (write_affinity, err.message))
        #value = conf.get('write_affinity_node_count',
        #                 '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections'))
        self.admin_key = conf.get('admin_key', None)
        register_osd_info(
            max_file_size=constraints.MAX_FILE_SIZE,
            max_meta_name_length=constraints.MAX_META_NAME_LENGTH,
            max_meta_value_length=constraints.MAX_META_VALUE_LENGTH,
            max_meta_count=constraints.MAX_META_COUNT,
            account_listing_limit=constraints.ACCOUNT_LISTING_LIMIT,
            container_listing_limit=constraints.CONTAINER_LISTING_LIMIT,
            max_account_name_length=constraints.MAX_ACCOUNT_NAME_LENGTH,
            max_container_name_length=constraints.MAX_CONTAINER_NAME_LENGTH,
            max_object_name_length=constraints.MAX_OBJECT_NAME_LENGTH,
            non_allowed_headers=constraints.NON_ALLOWED_HEADERS)

        self.proxy_port = int(static_conf.get('bind_port', 61005))
        self.__ll_port = int(conf.get('llport', 61014))

        self.max_bulk_delete_entries = int(conf.get(\
        'max_bulk_delete_entries', 1000))

        #unblock new requests which was blocked due to proxy service stop
        self.__request_unblock()

        hostname = socket.gethostname()
        self.__server_id = hostname + "_" + str(
            self.__ll_port) + "_proxy-server"

        # Start sending health to local leader
        self.logger.info("Loading health monitoring library")
        self.health_instance = healthMonitoring(self.__get_node_ip(hostname), \
            self.proxy_port, self.__ll_port, self.__server_id, True)
        self.logger.info("Loaded health monitoring library")
        remove_recovery_file('proxy-server')
コード例 #20
0
    def __init__(self, config, logger, communicator, recovery_stat,
                 service_id):
        self.__recovery_status = recovery_stat
        self.__serv_id = service_id
        self.__mount_check = True
        self.__root = "/export"
        self.logger = logger or get_logger(config, log_route='object-recovery')
        self.__recovery_handler_obj = \
            RecoveryHandler(self.__serv_id, self.logger)
        self._communicator_obj = communicator
        gl_info_obj = communicator.get_gl_info()
        retry_count = 0
        while retry_count < 3:
            retry_count += 1
            self._con_obj = communicator.connector(IoType.EVENTIO, gl_info_obj)
            if self._con_obj:
                self.logger.debug("CONN object success")
                break
        self.logger.debug("conn obj:%s, retry_count:%s " %
                          (self._con_obj, retry_count))

        self._network_fail_status_list = [
            Resp.BROKEN_PIPE, Resp.CONNECTION_RESET_BY_PEER,
            Resp.INVALID_SOCKET
        ]
        self._success_status_list = [Resp.SUCCESS]

        #flags
        self._final_recovery_status_list = []
        self._strm_ack_flag = False
        self._final_status = True
        self._finish_recovery_flag = False

        # return variables code
        ret = self._communicator_obj.recv_pro_start_monitoring( \
                    self.__serv_id, self._con_obj)

        if ret.status.get_status_code() in self._success_status_list:
            self.logger.info("Strm status successfull")
        else:
            gl_info_obj = self._communicator_obj.get_gl_info()
            retry_count = 0
            while retry_count < 3:
                retry_count += 1
                self._con_obj = self._communicator_obj.connector(
                    IoType.EVENTIO, gl_info_obj)
                if self._con_obj:
                    self.logger.debug("CONN object success")
                    ret = self._communicator_obj.recv_pro_start_monitoring(
                        self.__serv_id, self._con_obj)
                    if ret.status.get_status_code(
                    ) in self._success_status_list:
                        break
                    else:
                        self.logger.error("Start Monitoring Failure")
                        sys.exit(130)

            self.logger.debug("conn obj:%s, retry_count:%s " %
                              (self._con_obj, retry_count))
            # set flag, main thread will read this flag

        #TODO Start sending health to local leader
        # return variables code

        self.logger.debug("Initialized Object Recovery")
コード例 #21
0
        description="Recovery process option parser")
    parser.add_argument("server_id", help="Service id from Global leader.")
    args = parser.parse_args()
    __service_id = args.server_id

    #Change Process name of Script.
    script_prefix = "RECOVERY_"
    proc_name = script_prefix + __service_id
    procname.setprocname(proc_name)

    recovery_status = RecoveryStatus()
    conf = {
        'log_requests': 'false',
        'log_failure': 'true',
        'log_level': 'debug',
        'log_facility': 'LOG_LOCAL6',
        'log_name': 'object_recovery'
    }
    #create logger obj
    logger_obj = get_logger(conf, log_route='object-recovery')
    logger_obj.info("Service received is : %s of type %s " % \
                    (__service_id, type(__service_id)))

    #create communication object
    communicator_obj = Req(logger_obj)

    recover = RecoveryObject(conf, logger_obj, communicator_obj,
                             recovery_status, __service_id)
    recover.recover_data()
    logger_obj.info("Object Recovery finished")
コード例 #22
0
 def __init__(self, conf):
     self.logger = utils.get_logger(conf, log_route='ceilometer')
コード例 #23
0
ファイル: manager.py プロジェクト: confi-surya/pythonicPracto
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = REST_SERVERS

KILL_WAIT = 60  # seconds to wait for servers to die (by default)
WARNING_WAIT = 3  # seconds to wait after message that may just be a warning

MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2  # 2 GB
MAX_PROCS = 8192  # workers * disks * threads_per_disk, can get high

script_logger = get_logger({}, section='script')


def setup_env():
    """Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
    """
    try:
        resource.setrlimit(resource.RLIMIT_NOFILE,
                           (MAX_DESCRIPTORS, MAX_DESCRIPTORS))
    except ValueError:
        print _("WARNING: Unable to modify file descriptor limit.  "
                "Running as non-root?")
        script_logger.error("WARNING: Unable to modify file descriptor limit. Running as non-root?")

    try:
        resource.setrlimit(resource.RLIMIT_DATA,
コード例 #24
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='catch-errors')
     self.trans_id_suffix = conf.get('trans_id_suffix', '')
コード例 #25
0
    args = parser.parse_args()
    acc_service_id = args.server_id

    #Change Process name of Script.
    script_prefix = "RECOVERY_"
    proc_name = script_prefix + acc_service_id
    procname.setprocname(proc_name)

    #create config object
    CONFIG_PATH = "/opt/HYDRAstor/objectStorage/configFiles/account-server.conf"
    config = ConfigParser.ConfigParser()
    config.read(CONFIG_PATH)
    conf_obj = dict(config.items('DEFAULT'))

    #create logger obj
    logger_obj = get_logger(conf_obj, log_route='recovery')

    logger_obj.info("Parsed aguments:%s, configfile: %s, proc %s" % \
                    (acc_service_id, conf_obj, proc_name))

    #create communication object
    communicator_obj = Req(logger_obj)
    #create recovery obj and start recovery
    rec_data_obj = RecoveryData()
    retry = 0
    rec_obj = Recovery(conf_obj, logger_obj, acc_service_id, \
    communicator_obj, rec_data_obj)
    while retry < MAX_RETRY_COUNT:
        if rec_obj.connect_gl():
            logger_obj.info("Starting Recovery Process...")
            # create green thread for recovery instance
コード例 #26
0
    CONFIG_PATH = \
        "/opt/HYDRAstor/objectStorage/configFiles/container-recovery.conf"
    config = ConfigParser.ConfigParser()
    config.read(CONFIG_PATH)
    config_dict = dict(config.items('DEFAULT'))

    #Global variables for intermediated response handling
    list_of_tuple = queue(maxsize=1000)
    finish_recovery_flag = False
    final_status = True
    final_recovery_status_list = list()
    list_to_gl = []
    clean_journal_flag = True

    #logger initialize
    logger_obj = get_logger(config_dict, log_route='recovery')
    logger_obj.info("Parsed aguments:%s, configfile: %s, proc %s" % \
                    (service_id_, config_dict, proc_name))

    #Get Global map and retry 2 times.
    logger_obj.info("Start global map request")
    Request_handler_ = Req(logger_obj, timeout=0)
    gl_info_obj = Request_handler_.get_gl_info()

    retry_count = 0
    while retry_count < 3:
        retry_count += 1
        communicator_object_ = Request_handler_.connector(
            IoType.EVENTIO, gl_info_obj)
        if communicator_object_:
            logger_obj.debug("CONN object success")