Пример #1
0
def filter_factory(global_conf, **local_conf):
    """Returns a WSGI filter app for use with paste.deploy."""
    conf = global_conf.copy()
    conf.update(local_conf)
    register_osd_info('keystoneauth')

    def auth_filter(app):
        return KeystoneAuth(app, conf)

    return auth_filter
Пример #2
0
def filter_factory(global_conf, **local_conf):
    """Returns a WSGI filter app for use with paste.deploy."""
    conf = global_conf.copy()
    conf.update(local_conf)
    register_osd_info('tempauth', account_acls=True)

    def auth_filter(app):
        return TempAuth(app, conf)

    return auth_filter
Пример #3
0
def filter_factory(global_conf, **local_conf):
    """
    paste.deploy app factory for creating WSGI proxy apps.
    """
    conf = global_conf.copy()
    conf.update(local_conf)
    register_osd_info('ratelimit')

    def limit_filter(app):
        return RateLimitMiddleware(app, conf)

    return limit_filter
Пример #4
0
def filter_factory(global_conf, **local_conf):
    conf = global_conf.copy()
    conf.update(local_conf)

    max_manifest_segments = int(
        conf.get('max_manifest_segments', DEFAULT_MAX_MANIFEST_SEGMENTS))
    max_manifest_size = int(
        conf.get('max_manifest_size', DEFAULT_MAX_MANIFEST_SIZE))
    min_segment_size = int(
        conf.get('min_segment_size', DEFAULT_MIN_SEGMENT_SIZE))

    register_osd_info('slo',
                      max_manifest_segments=max_manifest_segments,
                      max_manifest_size=max_manifest_size,
                      min_segment_size=min_segment_size)

    def slo_filter(app):
        return StaticLargeObject(app,
                                 conf,
                                 max_manifest_segments=max_manifest_segments,
                                 max_manifest_size=max_manifest_size,
                                 min_segment_size=min_segment_size)

    return slo_filter
Пример #5
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None,
                 object_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        libraryUtils.OSDLoggerImpl("proxy-monitoring").initialize_logger()
        create_recovery_file('proxy-server')
        self.ongoing_operation_list = []
        self.stop_service_flag = False
        osd_dir = conf.get('osd_dir', '/export/.osd_meta_config')
        static_config_file = conf.get('static_config_file', \
            '/opt/HYDRAstor/objectStorage/configFiles/static_proxy-server.conf')
        #swift_dir = conf.get('swift_dir', '/etc/swift')
        static_conf = self.readconf(static_config_file)
        self.logger.debug("Static config parameters:%s" % (static_conf))
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(static_conf.get('put_queue_depth', 5))
        self.retry_count = int(static_conf.get('retry_count', 3))
        self.request_retry_time_service_unavailable = int(\
            static_conf.get('request_retry_time_service_unavailable', 100))
        self.request_retry_time_component_transfer = int(\
            static_conf.get('request_retry_time_component_transfer', 50))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        #self.error_suppression_interval = \
        #    int(conf.get('error_suppression_interval', 60))
        #self.error_suppression_limit = \
        #    int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(static_conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'false'))
        self.object_ring = object_ring or ObjectRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.container_ring = container_ring or \
        ContainerRing(osd_dir, self.logger, self.node_timeout)
        self.account_ring = account_ring or AccountRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(osd_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(static_conf.get('account_autocreate', 'yes'))
        #self.expiring_objects_account = \
        #    (conf.get('auto_create_account_prefix') or '.') + \
        #    (conf.get('expiring_objects_account_name') or 'expiring_objects')
        #self.expiring_objects_container_divisor = \
        #    int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 10000000)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        #self.rate_limit_after_segment = \
        #    int(conf.get('rate_limit_after_segment', 10))
        #self.rate_limit_segments_per_sec = \
        #    int(conf.get('rate_limit_segments_per_sec', 1))
        #self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        #self.cors_allow_origin = [
        #    a.strip()
        #    for a in conf.get('cors_allow_origin', '').split(',')
        #    if a.strip()]
        #self.strict_cors_mode = config_true_value(
        #    conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        #self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        #self.max_large_object_get_time = float(
        #    conf.get('max_large_object_get_time', '86400'))
        #value = conf.get('request_node_count', '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid request_node_count value: %r' % ''.join(value))
        #try:
        #    self._read_affinity = read_affinity = conf.get('read_affinity', '')
        #    self.read_affinity_sort_key = affinity_key_function(read_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid read_affinity value: %r (%s)" %
        #                     (read_affinity, err.message))
        #try:
        #    write_affinity = conf.get('write_affinity', '')
        #    self.write_affinity_is_local_fn \
        #        = affinity_locality_predicate(write_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid write_affinity value: %r (%s)" %
        #                     (write_affinity, err.message))
        #value = conf.get('write_affinity_node_count',
        #                 '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections'))
        self.admin_key = conf.get('admin_key', None)
        register_osd_info(
            max_file_size=constraints.MAX_FILE_SIZE,
            max_meta_name_length=constraints.MAX_META_NAME_LENGTH,
            max_meta_value_length=constraints.MAX_META_VALUE_LENGTH,
            max_meta_count=constraints.MAX_META_COUNT,
            account_listing_limit=constraints.ACCOUNT_LISTING_LIMIT,
            container_listing_limit=constraints.CONTAINER_LISTING_LIMIT,
            max_account_name_length=constraints.MAX_ACCOUNT_NAME_LENGTH,
            max_container_name_length=constraints.MAX_CONTAINER_NAME_LENGTH,
            max_object_name_length=constraints.MAX_OBJECT_NAME_LENGTH,
            non_allowed_headers=constraints.NON_ALLOWED_HEADERS)

        self.proxy_port = int(static_conf.get('bind_port', 61005))
        self.__ll_port = int(conf.get('llport', 61014))

        self.max_bulk_delete_entries = int(conf.get(\
        'max_bulk_delete_entries', 1000))

        #unblock new requests which was blocked due to proxy service stop
        self.__request_unblock()

        hostname = socket.gethostname()
        self.__server_id = hostname + "_" + str(
            self.__ll_port) + "_proxy-server"

        # Start sending health to local leader
        self.logger.info("Loading health monitoring library")
        self.health_instance = healthMonitoring(self.__get_node_ip(hostname), \
            self.proxy_port, self.__ll_port, self.__server_id, True)
        self.logger.info("Loaded health monitoring library")
        remove_recovery_file('proxy-server')
Пример #6
0
def filter_factory(global_conf, **local_conf):
    """Returns the WSGI filter for use with paste.deploy."""
    conf = global_conf.copy()
    conf.update(local_conf)
    register_osd_info('formpost')
    return lambda app: FormPost(app, conf)