예제 #1
0
    def __init__(self, conf, logger=None):
        """
        constructor for account updater
        :param conf: configuration of account-updater 
        """
        self.logger = logger or SimpleLogger(conf).get_logger_object()
        Daemon.__init__(self, conf, self.logger)
        libraryUtils.OSDLoggerImpl(
            "account-updater-monitoring").initialize_logger()
        create_recovery_file('account-updater-server')
        self.conf = conf
        self.__interval = int(conf.get('interval', 1800))
        self.__ll_port = int(conf.get('llport', 61014))
        self.__account_updater_port = int(\
            conf.get('account_updater_port', 61009))
        self.__service_id = gethostname() + "_" + str(self.__ll_port) + \
            "_account-updater-server"
        self.__param = self.__get_param(conf)
        self.msg = GlobalVariables(self.logger)
        self.msg.set_service_id(self.__service_id)
        self.walker_map = WalkerMap()
        self.reader_map = ReaderMap()

        # Start sending health to local leader
        self.logger.info("Loading health monitoring library")
        self.health_instance = healthMonitoring(self.__get_node_ip\
            (gethostname()), self.__account_updater_port, \
            self.__ll_port, self.__service_id)
        self.logger.info("Loaded health monitoring library")
        remove_recovery_file('account-updater-server')

        # load global map
        if not self.msg.load_gl_map():
            sys.exit(130)
        self.logger.info("Account updater started")
예제 #2
0
 def test_ll_not_available(self):
     ip = "127.0.0.1"
     port = 61007
     ll_port = 123
     service_id = "HN0101_61011_container-service"
     print "Calling health monitoring!!!!"
     health = healthMonitoring(ip, port, ll_port, service_id)
     time.sleep(120)
예제 #3
0
 def test_send_strm_hrbt_proxy(self):
     ip = "127.0.0.1"
     port = 61007
     ll_port = 11
     service_id = "HN0101_61011_container-service"
     print "Calling health monitoring!!!!"
     health = healthMonitoring(ip, port, ll_port, service_id, True)
     time.sleep(120)
예제 #4
0
 def test_send_strm_hrbt(self):
     ip = "127.0.0.1"
     port = 61007
     ll_port = 11
     service_id = "HN0101_61011_container-service"
     print "Calling health monitoring!!!!"
     health = healthMonitoring(ip, port, ll_port, service_id)
     time.sleep(120)
     self.assertEqual(health, False)
     self.__ll_instance.stop()
     self.__ll_instance.join()
예제 #5
0
    def startup_recovery(self, __get_node_ip=None, __port=None, __ll_port=None, __serv_id=None, recovery_flag=False):
        '''
        Start the start up recovery procedure.
        '''
        self._logger.info("Starting start up recovery process for " 
                          "object server: %s" % self.__object_service_id)
        try:
            create_recovery_file('object-server')
        except Exception as err:
            self._logger.error('Failed to create recovery file')
            return False

        #jai:-To recover data directory wise 
        #we would need to comment out this section
        #try:
        #    self._move_data()
        #except (OSError, Exception):
        #    return False
        tmp_dir_list_to_recover = self._get_tmp_dir_list()
        try:
            for tmp_dir in tmp_dir_list_to_recover:
                try:
                    self.__directory_iterator_obj.recover(tmp_dir)
                except (OSError, Exception) as err:
                    self._logger.error(__(
                        'ERROR Could not complete startup recovery process '
                        'for tmp directory: %(tmp_dir)s'
                        ' close failure: %(exc)s : %(stack)s'),
                        {'exc': err, 'stack': ''.join(traceback.format_stack()),
                         'tmp_dir': tmp_dir})
                    return False
        except Exception as err:
            self._logger.error(__(
                'ERROR Could not complete startup recovery process for '
                'object server: %(object_service_id)s'
                ' close failure: %(exc)s : %(stack)s'),
                {'exc': err, 'stack': ''.join(traceback.format_stack()),
                 'object_service_id': self.__object_service_id})
            return False
        finally:
            try:
                # Start sending healthMonitoring to local leader
                if not recovery_flag:
                    self.health = healthMonitoring(__get_node_ip, __port, __ll_port, __serv_id)
                    self._logger.debug("Health monitoring instance executed")
                status = None
                remove_recovery_file('object-server')
            except Exception as err:
                self._logger.error('Failed to remove recovery file %s'% err)
                status = True
        
        if status:
            return False
        return True
예제 #6
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None,
                 object_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        libraryUtils.OSDLoggerImpl("proxy-monitoring").initialize_logger()
        create_recovery_file('proxy-server')
        self.ongoing_operation_list = []
        self.stop_service_flag = False
        osd_dir = conf.get('osd_dir', '/export/.osd_meta_config')
        static_config_file = conf.get('static_config_file', \
            '/opt/HYDRAstor/objectStorage/configFiles/static_proxy-server.conf')
        #swift_dir = conf.get('swift_dir', '/etc/swift')
        static_conf = self.readconf(static_config_file)
        self.logger.debug("Static config parameters:%s" % (static_conf))
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(static_conf.get('put_queue_depth', 5))
        self.retry_count = int(static_conf.get('retry_count', 3))
        self.request_retry_time_service_unavailable = int(\
            static_conf.get('request_retry_time_service_unavailable', 100))
        self.request_retry_time_component_transfer = int(\
            static_conf.get('request_retry_time_component_transfer', 50))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        #self.error_suppression_interval = \
        #    int(conf.get('error_suppression_interval', 60))
        #self.error_suppression_limit = \
        #    int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(static_conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'false'))
        self.object_ring = object_ring or ObjectRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.container_ring = container_ring or \
        ContainerRing(osd_dir, self.logger, self.node_timeout)
        self.account_ring = account_ring or AccountRing(osd_dir, self.logger, \
                                          self.node_timeout)
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(osd_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(static_conf.get('account_autocreate', 'yes'))
        #self.expiring_objects_account = \
        #    (conf.get('auto_create_account_prefix') or '.') + \
        #    (conf.get('expiring_objects_account_name') or 'expiring_objects')
        #self.expiring_objects_container_divisor = \
        #    int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 10000000)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        #self.rate_limit_after_segment = \
        #    int(conf.get('rate_limit_after_segment', 10))
        #self.rate_limit_segments_per_sec = \
        #    int(conf.get('rate_limit_segments_per_sec', 1))
        #self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        #self.cors_allow_origin = [
        #    a.strip()
        #    for a in conf.get('cors_allow_origin', '').split(',')
        #    if a.strip()]
        #self.strict_cors_mode = config_true_value(
        #    conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        #self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        #self.max_large_object_get_time = float(
        #    conf.get('max_large_object_get_time', '86400'))
        #value = conf.get('request_node_count', '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.request_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid request_node_count value: %r' % ''.join(value))
        #try:
        #    self._read_affinity = read_affinity = conf.get('read_affinity', '')
        #    self.read_affinity_sort_key = affinity_key_function(read_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid read_affinity value: %r (%s)" %
        #                     (read_affinity, err.message))
        #try:
        #    write_affinity = conf.get('write_affinity', '')
        #    self.write_affinity_is_local_fn \
        #        = affinity_locality_predicate(write_affinity)
        #except ValueError as err:
        #    # make the message a little more useful
        #    raise ValueError("Invalid write_affinity value: %r (%s)" %
        #                     (write_affinity, err.message))
        #value = conf.get('write_affinity_node_count',
        #                 '2 * replicas').lower().split()
        #if len(value) == 1:
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value
        #elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
        #    value = int(value[0])
        #    self.write_affinity_node_count = lambda replicas: value * replicas
        #else:
        #    raise ValueError(
        #        'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections'))
        self.admin_key = conf.get('admin_key', None)
        register_osd_info(
            max_file_size=constraints.MAX_FILE_SIZE,
            max_meta_name_length=constraints.MAX_META_NAME_LENGTH,
            max_meta_value_length=constraints.MAX_META_VALUE_LENGTH,
            max_meta_count=constraints.MAX_META_COUNT,
            account_listing_limit=constraints.ACCOUNT_LISTING_LIMIT,
            container_listing_limit=constraints.CONTAINER_LISTING_LIMIT,
            max_account_name_length=constraints.MAX_ACCOUNT_NAME_LENGTH,
            max_container_name_length=constraints.MAX_CONTAINER_NAME_LENGTH,
            max_object_name_length=constraints.MAX_OBJECT_NAME_LENGTH,
            non_allowed_headers=constraints.NON_ALLOWED_HEADERS)

        self.proxy_port = int(static_conf.get('bind_port', 61005))
        self.__ll_port = int(conf.get('llport', 61014))

        self.max_bulk_delete_entries = int(conf.get(\
        'max_bulk_delete_entries', 1000))

        #unblock new requests which was blocked due to proxy service stop
        self.__request_unblock()

        hostname = socket.gethostname()
        self.__server_id = hostname + "_" + str(
            self.__ll_port) + "_proxy-server"

        # Start sending health to local leader
        self.logger.info("Loading health monitoring library")
        self.health_instance = healthMonitoring(self.__get_node_ip(hostname), \
            self.proxy_port, self.__ll_port, self.__server_id, True)
        self.logger.info("Loaded health monitoring library")
        remove_recovery_file('proxy-server')