Пример #1
0
 def __init__(self, osd_dir, logger):
     """
     Constructor for UpdaterRing.
     """
     self.container_ring = ContainerRing(osd_dir, logger)
     self.account_ring = AccountRing(osd_dir, logger)
     self.logger = logger
     self.msg = GlobalVariables(self.logger)
     self.shift_param = 512
def get_container_details(logger):
    """
    Function to get ip and port of container service
    Returns: ip, port
    """
    ip, port = '', ''
    try:
        cont_service_id = get_container_id()
        cont_ring = ContainerRing(RING_DIR)
        data = cont_ring.get_service_details(cont_service_id)[0]
        ip = data['ip']
        port = data['port']
    except Exception as err:
        logger.exception(err)
    return ip, port
Пример #3
0
class ServiceLocator:
    """
    Give the node information. 
    """
    def __init__(self, osd_dir, logger):
        """
        Constructor for UpdaterRing.
        """
        self.container_ring = ContainerRing(osd_dir, logger)
        self.account_ring = AccountRing(osd_dir, logger)
        self.logger = logger
        self.msg = GlobalVariables(self.logger)
        self.shift_param = 512

    def get_service_details(self, service_obj):
        node = {}
        node['ip'] = service_obj.get_ip()
        node['port'] = service_obj.get_port()
        return node

    def get_container_from_ring(self, account_hash, container_hash):
        """get container node info from ring"""
        #TODO: Needs to modify to get complete node info(i.e. fs, dir)
        comp_no = Calculation.evaluate(container_hash, self.shift_param) - 1
        node = self.get_service_details(\
            self.msg.get_container_map()[comp_no])
        node['fs'] = self.container_ring.get_filesystem()
        node['dir'] = self.get_cont_dir_by_hash(\
            account_hash, container_hash)
        return node

    def get_account_from_ring(self, account_hash):
        """get account node info from ring"""
        #TODO: Needs to modify to get complete node info(i.e. fs)
        comp_no = Calculation.evaluate(account_hash, self.shift_param) - 1
        node = self.get_service_details(\
            self.msg.get_account_map()[comp_no])
        node['fs'] = self.account_ring.get_filesystem()
        node['account'] = account_hash
        node['dir'] = self.get_acc_dir_by_hash(account_hash)
        return node

    def get_acc_dir_by_hash(self, key):
        return self.account_ring.get_directory_by_hash(key)

    def get_cont_dir_by_hash(self, acc_hash, cont_hash):
        return self.container_ring.get_directory_by_hash(acc_hash, cont_hash)
Пример #4
0
    def __init__(self, account_name, conf, logger):
        """
        Constructor for Account

        :param account_name: account name
        """
        self.account_name = account_name
        self.account_map = {}
        self.acc_update_failed = False
        self.__container_list = []
        self.container_ring = ContainerRing(conf['osd_dir'], logger)
        self.stat_info = {}
        self.conf = conf
        self.logger = logger
        self.record_instance = []
        #self.logger.debug("AccountInfo constructed")
        self.__stat_reader_max_count = int(conf['reader_max_count'])
        self.connection_creator = ConnectionCreator(self.conf, self.logger)
Пример #5
0
class AccountInfo:
    """
    Store the account information
    """
    def __init__(self, account_name, conf, logger):
        """
        Constructor for Account

        :param account_name: account name
        """
        self.account_name = account_name
        self.account_map = {}
        self.acc_update_failed = False
        self.__container_list = []
        self.container_ring = ContainerRing(conf['osd_dir'], logger)
        self.stat_info = {}
        self.conf = conf
        self.logger = logger
        self.record_instance = []
        #self.logger.debug("AccountInfo constructed")
        self.__stat_reader_max_count = int(conf['reader_max_count'])
        self.connection_creator = ConnectionCreator(self.conf, self.logger)

    def getAccountName(self):
        """
        Return account name
        """
        return self.account_name

    def is_container(self, container):
        """
        Checking if container exists in container_list

        :param container : container name
        """
        if container in self.__container_list:
            return True

    def add_container(self, container):
        """
        Adding container in container_list

        :param container: container name
        """
        self.__container_list.append(container)

    def remove_container(self):
        """
        Removing all containers from container_list
        """
        self.__container_list = []

    def get_container_path(self):
        """
        Getting container path for HEAD request.
        """
        for container_name in self.__container_list:
            #node, fs, dir, gl_version, comp_no  = \
            #    self.container_ring.get_node(self.account_name, container_name)
            if self.account_map.get((self.account_name, container_name)) == "success":
                continue
            fs = self.container_ring.get_filesystem()
            self.__container_path = os.path.join('/', fs, \
                self.container_ring.get_directory_by_hash(\
                self.account_name, container_name), \
                self.account_name, container_name)
            self.logger.debug('Path for account: %s and container: %s is %s' \
                %(self.account_name, container_name, self.__container_path))
            container_instance = ContainerStatReader(
                                                container_name,
                                                self.__container_path,
                                                self.account_name,
                                                self.conf,
                                                self.logger
                                                )
            self.record_instance.append(container_instance)
            self.account_map[self.account_name, container_name] = "start"
        self.logger.debug('Exit from set_account_info')

    def execute(self):
        """
        Read the container stat info.
        """
        self.logger.debug('Enter in execute')
        thread_id = []
        thread_count = 0
        for container_record in self.record_instance:
            if container_record.is_completed():
                self.logger.debug("Already performed HEAD on:%s" %(repr(container_record)))
                continue
            if thread_count <= self.__stat_reader_max_count:
                reader_thread = Thread(target=container_record.\
                    read_container_stat_info, args=(self.account_map, ))
                reader_thread.start()
                thread_id.append(reader_thread)
                thread_count = thread_count +1
                if thread_count >= self.__stat_reader_max_count:
                    for thread in thread_id:
                        if thread.is_alive():
                            thread.join()
                    thread_count = 0
        for thread in thread_id:
            if thread.is_alive():
                thread.join()
        self.logger.info('Number of container stat reads: %s' \
            % len(self.record_instance))
Пример #6
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None,
                 object_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        libraryUtils.OSDLoggerImpl("proxy-monitoring").initialize_logger()
        create_recovery_file('proxy-server')
        self.ongoing_operation_list = []
        self.stop_service_flag = False
        osd_dir = conf.get('osd_dir', '/export/.osd_meta_config')
        static_config_file = conf.get('static_config_file', \
            '/opt/HYDRAstor/objectStorage/configFiles/static_proxy-server.conf')
        #swift_dir = conf.get('swift_dir', '/etc/swift')
        static_conf = self.readconf(static_config_file)
        self.logger.debug("Static config parameters:%s" % (static_conf))
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(static_conf.get('put_queue_depth', 5))
        self.retry_count = int(static_conf.get('retry_count', 3))
        self.request_retry_time_service_unavailable = int(\
            static_conf.get('request_retry_time_service_unavailable', 100))
        self.request_retry_time_component_transfer = int(\
            static_conf.get('request_retry_time_component_transfer', 50))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        #self.error_suppression_interval = \
        #    int(conf.get('error_suppression_interval', 60))
        #self.error_suppression_limit = \
        #    int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(static_conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'false'))
        self.object_ring = object_ring or ObjectRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.container_ring = container_ring or \
        ContainerRing(osd_dir, self.logger, self.node_timeout)
        self.account_ring = account_ring or AccountRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(osd_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(static_conf.get('account_autocreate', 'yes'))
        #self.expiring_objects_account = \
        #    (conf.get('auto_create_account_prefix') or '.') + \
        #    (conf.get('expiring_objects_account_name') or 'expiring_objects')
        #self.expiring_objects_container_divisor = \
        #    int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 10000000)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        #self.rate_limit_after_segment = \
        #    int(conf.get('rate_limit_after_segment', 10))
        #self.rate_limit_segments_per_sec = \
        #    int(conf.get('rate_limit_segments_per_sec', 1))
        #self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        #self.cors_allow_origin = [
        #    a.strip()
        #    for a in conf.get('cors_allow_origin', '').split(',')
        #    if a.strip()]
        #self.strict_cors_mode = config_true_value(
        #    conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        #self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        #self.max_large_object_get_time = float(
        #    conf.get('max_large_object_get_time', '86400'))
        #value = conf.get('request_node_count', '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid request_node_count value: %r' % ''.join(value))
        #try:
        #    self._read_affinity = read_affinity = conf.get('read_affinity', '')
        #    self.read_affinity_sort_key = affinity_key_function(read_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid read_affinity value: %r (%s)" %
        #                     (read_affinity, err.message))
        #try:
        #    write_affinity = conf.get('write_affinity', '')
        #    self.write_affinity_is_local_fn \
        #        = affinity_locality_predicate(write_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid write_affinity value: %r (%s)" %
        #                     (write_affinity, err.message))
        #value = conf.get('write_affinity_node_count',
        #                 '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections'))
        self.admin_key = conf.get('admin_key', None)
        register_osd_info(
            max_file_size=constraints.MAX_FILE_SIZE,
            max_meta_name_length=constraints.MAX_META_NAME_LENGTH,
            max_meta_value_length=constraints.MAX_META_VALUE_LENGTH,
            max_meta_count=constraints.MAX_META_COUNT,
            account_listing_limit=constraints.ACCOUNT_LISTING_LIMIT,
            container_listing_limit=constraints.CONTAINER_LISTING_LIMIT,
            max_account_name_length=constraints.MAX_ACCOUNT_NAME_LENGTH,
            max_container_name_length=constraints.MAX_CONTAINER_NAME_LENGTH,
            max_object_name_length=constraints.MAX_OBJECT_NAME_LENGTH,
            non_allowed_headers=constraints.NON_ALLOWED_HEADERS)

        self.proxy_port = int(static_conf.get('bind_port', 61005))
        self.__ll_port = int(conf.get('llport', 61014))

        self.max_bulk_delete_entries = int(conf.get(\
        'max_bulk_delete_entries', 1000))

        #unblock new requests which was blocked due to proxy service stop
        self.__request_unblock()

        hostname = socket.gethostname()
        self.__server_id = hostname + "_" + str(
            self.__ll_port) + "_proxy-server"

        # Start sending health to local leader
        self.logger.info("Loading health monitoring library")
        self.health_instance = healthMonitoring(self.__get_node_ip(hostname), \
            self.proxy_port, self.__ll_port, self.__server_id, True)
        self.logger.info("Loaded health monitoring library")
        remove_recovery_file('proxy-server')
Пример #7
0
 def __init__(self, logger=None, container_ring=None):
     self._logger = logger
     self.__container_ring = container_ring or \
                   ContainerRing(OSD_DIR, self._logger)