コード例 #1
0
    def __init__(self):
        self._conf_reader = ConfigReader()
        self._max_size = int(
            self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,
                                                      self.LIMIT_CONSUL_MEMORY,
                                                      50000000))
        self._current_size = store.get("SSPL_MEMORY_USAGE")
        if self._current_size is None:
            store.put(0, "SSPL_MEMORY_USAGE")

        self._head = store.get("SSPL_MESSAGE_HEAD_INDEX")
        if self._head is None:
            store.put(0, "SSPL_MESSAGE_HEAD_INDEX")

        self._tail = store.get("SSPL_MESSAGE_TAIL_INDEX")
        if self._tail is None:
            store.put(0, "SSPL_MESSAGE_TAIL_INDEX")
コード例 #2
0
    def __init__(self):

        # Validate configuration file for required valid values
        try:
            self.conf_reader = ConfigReader()

        except (IOError, ConfigReader.Error) as err:
            logger.error("[ Error ] when validating the config file {0} - {1}"\
                 .format(self.CONF_FILE, err))

        self.vol_ras = self.conf_reader._get_value_with_default(\
            self.SYSINFO, COMMON_CONFIGS.get(self.SYSINFO).get("data_path"), self.DEFAULT_RAS_VOL)

        self.encl_cache = self.vol_ras + "encl/"
        self.frus = self.encl_cache + "frus/"

        self.encl.update({"frus":self.memcache_frus})
        self.encl.update({"system":self.memcache_system})

        self._check_ras_vol()
コード例 #3
0
    def __init__(self):
        super(NodeData, self).__init__()

        self.host_id = socket.getfqdn()
        self._epoch_time = str(int(time.time()))
        # Total number of CPUs
        self.cpus = psutil.cpu_count()

        # Calculate the load averages on separate blocking threads
        self.load_1min_average  = []
        self.load_5min_average  = []
        self.load_15min_average = []
        self.prev_bmcip = None
        load_1min_avg  = threading.Thread(target=self._load_1min_avg).start()
        load_5min_avg  = threading.Thread(target=self._load_5min_avg).start()
        load_15min_avg = threading.Thread(target=self._load_15min_avg).start()

        self.conf_reader = ConfigReader()

        nw_fault_utility = self.conf_reader._get_value_with_default(
                                              self.name().capitalize(),
                                              self.PROBE,
                                              "sysfs")

        self._utility_instance = None

        try:
            # Creating the instance of ToolFactory class
            self.tool_factory = ToolFactory()
            # Get the instance of the utility using ToolFactory
            self._utility_instance = self._utility_instance or \
                                self.tool_factory.get_instance(nw_fault_utility)
            if self._utility_instance:
                # Initialize the path as /sys/class/net/
                self.nw_interface_path = self._utility_instance.get_sys_dir_path('net')
        except KeyError as key_error:
            logger.error(f'NodeData, Unable to get the instance of {nw_fault_utility} Utility')
        except Exception as err:
            logger.error(f'NodeData, Problem occured while getting the instance of {nw_fault_utility}')
コード例 #4
0
    def __init__(self):

        # Validate configuration file for required valid values
        try:
            self.conf_reader = ConfigReader()

        except (IOError, ConfigReader.Error) as err:
            logger.error("[ Error ] when validating the config file {0} - {1}"\
                 .format(self.CONF_FILE, err))

        self.vol_ras = Conf.get(SSPL_CONF, f"{self.SYSINFO}>{DATA_PATH_KEY}",
                                self.DEFAULT_RAS_VOL)

        self.encl_cache = self.vol_ras + "encl/"
        self.frus = self.encl_cache + "frus/"

        self.encl.update({"frus": self.memcache_frus})
        self.encl.update({"system": self.memcache_system})

        self._check_ras_vol()
コード例 #5
0
    def confReader(self):
        path_to_conf_file = "/etc/sspl.conf"

        try:
            conf_reader = ConfigReader(is_test=True,
                                       test_config_path=path_to_conf_file)

        except (IOError, ConfigReader.Error) as err:
            # We don't have logger yet, need to find log_level from conf file first
            print("[ Error ] when validating the configuration file %s :" % \
                path_to_conf_file)
            print(err)
            print("Exiting ...")
            exit(os.EX_USAGE)

        self._virtualhost = conf_reader._get_value_with_default(
            self.module_name, self.VIRTUALHOST, 'SSPL')
        # Need to keep cluster_id string here to generate decryption key
        self.cluster_id = conf_reader._get_value_with_default(
            self.SYSTEM_INFORMATION, self.CLUSTER_ID, 'CC01')

        # Ingress configuration
        if self.module_name == "RABBITMQINGRESSPROCESSOR":
            # Configuration to send message
            self._ingress_queue = conf_reader._get_value_with_default(
                self.module_name, self.QUEUE_NAME, 'actuator-req-queue')
            self._ingress_exchange = conf_reader._get_value_with_default(
                self.module_name, self.EXCHANGE_NAME, 'sspl-in')
            self._ingress_key = conf_reader._get_value_with_default(
                self.module_name, self.ROUTINGKEY, 'actuator-req-key')
            # Configuration to recieve sensor messages

            self._egress_queue = conf_reader._get_value_with_default(
                'RABBITMQEGRESSPROCESSOR', self.QUEUE_NAME, 'sensor-queue')
            self._egress_exchange = conf_reader._get_value_with_default(
                'RABBITMQEGRESSPROCESSOR', self.EXCHANGE_NAME, 'sspl-out')
            self._egress_key = conf_reader._get_value_with_default(
                'RABBITMQEGRESSPROCESSOR', self.ROUTINGKEY, 'sensor-key')

        elif self.module_name == "PLANECNTRLRMQINGRESSPROCESSOR":
            self._ingress_queue = conf_reader._get_value_with_default(
                self.module_name, self.QUEUE_NAME, 'ras_status')
        # Egress Queue
        if self.module_name == "RABBITMQEGRESSPROCESSOR":
            self._egress_queue = conf_reader._get_value_with_default(
                self.module_name, self.QUEUE_NAME, 'sensor-queue')
        elif self.module_name == "PLANECNTRLRMQEGRESSPROCESSOR":
            self._egress_queue = conf_reader._get_value_with_default(
                self.module_name, self.QUEUE_NAME, 'ras_control')

        self._ackexchangename = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.ACKEXCHANGE_NAME, 'sspl-out')
        self._ackqueuename = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.ACKQUEUE_NAME,
            'actuator-resp-queue')
        self._ackroutingkey = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.ACKROUTINGKEY, 'actuator-resp-key')
        self._exchangename = conf_reader._get_value_with_default(
            self.module_name, self.EXCHANGE_NAME, 'sspl-in')
        self._routingkey = conf_reader._get_value_with_default(
            self.module_name, self.ROUTINGKEY, 'sensor-key')
        self._username = conf_reader._get_value_with_default(
            self.module_name, self.USERNAME, 'sspluser')
        self._password = conf_reader._get_value_with_default(
            self.module_name, self.PASSWORD, 'sspl4ever')
        self._signature_user = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.SIGNATURE_USERNAME, 'sspl-ll')
        self._signature_token = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.SIGNATURE_TOKEN,
            'ALOIUD986798df69a8koDISLKJ282983')
        self._signature_expires = conf_reader._get_value_with_default(
            'RABBITMQEGRESSPROCESSOR', self.SIGNATURE_EXPIRES, "3600")
        if self.module_name == "PLANECNTRLRMQEGRESSPROCESSOR":
            self._primary_rabbitmq_server = conf_reader._get_value_with_default(
                self.module_name, self.PRIMARY_RABBITMQ, 'localhost')
            self._secondary_rabbitmq_server = conf_reader._get_value_with_default(
                self.module_name, self.SECONDARY_RABBITMQ, 'localhost')
        else:
            self._primary_rabbitmq_server = "localhost"
            self._secondary_rabbitmq_server = "localhost"

        decryption_key = encryptor.gen_key(self.cluster_id,
                                           ServiceTypes.RABBITMQ.value)
        self._password = encryptor.decrypt(decryption_key,
                                           self._password.encode('ascii'),
                                           "TestRabbitmqEgressProcessor")

        self._current_rabbitmq_server = self._primary_rabbitmq_server
コード例 #6
0
class NodeData(Debug):
    """Obtains data about the node and makes it available"""


    SENSOR_NAME = "NodeData"

    # conf attribute initialization
    PROBE = 'probe'

    @staticmethod
    def name():
        """@return: name of the module."""
        return NodeData.SENSOR_NAME

    def __init__(self):
        super(NodeData, self).__init__()

        self.host_id = socket.getfqdn()
        self._epoch_time = str(int(time.time()))
        # Total number of CPUs
        self.cpus = psutil.cpu_count()

        # Calculate the load averages on separate blocking threads
        self.load_1min_average  = []
        self.load_5min_average  = []
        self.load_15min_average = []
        self.prev_bmcip = None
        load_1min_avg  = threading.Thread(target=self._load_1min_avg).start()
        load_5min_avg  = threading.Thread(target=self._load_5min_avg).start()
        load_15min_avg = threading.Thread(target=self._load_15min_avg).start()

        self.conf_reader = ConfigReader()

        nw_fault_utility = self.conf_reader._get_value_with_default(
                                              self.name().capitalize(),
                                              self.PROBE,
                                              "sysfs")

        self._utility_instance = None

        try:
            # Creating the instance of ToolFactory class
            self.tool_factory = ToolFactory()
            # Get the instance of the utility using ToolFactory
            self._utility_instance = self._utility_instance or \
                                self.tool_factory.get_instance(nw_fault_utility)
            if self._utility_instance:
                # Initialize the path as /sys/class/net/
                self.nw_interface_path = self._utility_instance.get_sys_dir_path('net')
        except KeyError as key_error:
            logger.error(f'NodeData, Unable to get the instance of {nw_fault_utility} Utility')
        except Exception as err:
            logger.error(f'NodeData, Problem occured while getting the instance of {nw_fault_utility}')

    def read_data(self, subset, debug, units="MB"):
        """Updates data based on a subset"""
        self._set_debug(debug)
        self._log_debug("read_data, subset: %s, units: %s" % (subset, units))

        try:
            # Determine the units factor value
            self.units_factor = 1
            if units == "GB":
                self.units_factor = 1000000000
            elif units == "MB":
                self.units_factor = 1000000
            elif units == "KB":
                self.units_factor = 1000

            # First call gethostname() to see if it returns something that looks like a host name,
            # if not then get the host by address
            # Find a meaningful hostname to be used
            self.host_id = socket.getfqdn()
            # getfqdn() function checks the socket.gethostname() to get the host name if it not available
            # then it try to find host name from socket.gethostbyaddr(socket.gethostname())[0] and return the
            # meaningful host name priviously we chking the this two conditions explicitly which is implicitly
            # doing by getfqdn() function. so removing the code and adding the getfqdn() function to get Hostname.

            self.local_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S %Z')

            # Branch off and gather data based upon value sent into subset
            if subset == "host_update":
                self._get_host_update_data()

            elif subset == "local_mount_data":
                self._get_local_mount_data()

            elif subset == "cpu_data":
                self._get_cpu_data()

            elif subset == "if_data":
                self._get_if_data()

            elif subset == "disk_space_alert":
                self._get_disk_space_alert_data()

        except Exception as e:
            logger.exception(e)
            return False

        return True

    def _get_host_update_data(self):
        """Retrieves node information for the host_update json message"""
        logged_in_users = []
        uname_keys = ("sysname", "nodename", "version", "release", "machine")
        self.up_time         = int(psutil.boot_time())
        self.boot_time       = self._epoch_time
        self.uname           = dict(zip(uname_keys, os.uname()))
        self.total_memory = dict(psutil.virtual_memory()._asdict())
        self.process_count   = len(psutil.pids())
        for users in psutil.users():
            logged_in_users.append(dict(users._asdict()))
        self.logged_in_users = logged_in_users
        # Calculate the current number of running processes at this moment
        total_running_proc = 0
        for proc in psutil.process_iter():
            pinfo = proc.as_dict(attrs=['status'])
            if pinfo['status'] not in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD,
                                       psutil.STATUS_STOPPED, psutil.STATUS_IDLE,
                                       psutil.STATUS_SLEEPING):
                total_running_proc += 1
        self.running_process_count = total_running_proc

    def _get_local_mount_data(self):
        """Retrieves node information for the local_mount_data json message"""
        self.total_space = int(psutil.disk_usage("/")[0])//int(self.units_factor)
        self.free_space  = int(psutil.disk_usage("/")[2])//int(self.units_factor)
        self.total_swap  = int(psutil.swap_memory()[0])//int(self.units_factor)
        self.free_swap   = int(psutil.swap_memory()[2])//int(self.units_factor)
        self.free_inodes = int(100 - math.ceil((float(os.statvfs("/").f_files - os.statvfs("/").f_ffree) \
                             / os.statvfs("/").f_files) * 100))

    def _get_cpu_data(self):
        """Retrieves node information for the cpu_data json message"""
        cpu_core_usage_dict = dict()
        cpu_data = psutil.cpu_times_percent()
        self._log_debug("_get_cpu_data, cpu_data: %s %s %s %s %s %s %s %s %s %s" % cpu_data)

        self.csps           = 0  # What the hell is csps - cycles per second?
        self.user_time      = int(cpu_data[0])
        self.nice_time      = int(cpu_data[1])
        self.system_time    = int(cpu_data[2])
        self.idle_time      = int(cpu_data[3])
        self.iowait_time    = int(cpu_data[4])
        self.interrupt_time = int(cpu_data[5])
        self.softirq_time   = int(cpu_data[6])
        self.steal_time     = int(cpu_data[7])

        self.cpu_usage = psutil.cpu_percent(interval=1, percpu=False)
        # Array to hold data about each CPU core
        self.cpu_core_data = []
        index = 0
        while index < self.cpus:
            self._log_debug("_get_cpu_data, index: %s, 1 min: %s, 5 min: %s, 15 min: %s" %
                            (index,
                            self.load_1min_average[index],
                            self.load_5min_average[index],
                            self.load_15min_average[index]))

            cpu_core_data = {"coreId"      : index,
                             "load1MinAvg" : int(self.load_1min_average[index]),
                             "load5MinAvg" : int(self.load_5min_average[index]),
                             "load15MinAvg": int(self.load_15min_average[index]),
                             "ips" : 0
                             }
            self.cpu_core_data.append(cpu_core_data)
            index += 1

    def _get_if_data(self):
        """Retrieves node information for the if_data json message"""
        net_data = psutil.net_io_counters(pernic=True)
        # Array to hold data about each network interface
        self.if_data = []
        bmc_data = self._get_bmc_info()
        for interface, if_data in net_data.items():
            self._log_debug("_get_if_data, interface: %s %s" % (interface, net_data))
            nw_status = self._fetch_nw_status()
            nw_cable_conn_status = self.fetch_nw_cable_conn_status(interface)
            if_data = {"ifId" : interface,
                       "networkErrors"      : (net_data[interface].errin +
                                               net_data[interface].errout),
                       "droppedPacketsIn"   : net_data[interface].dropin,
                       "packetsIn"          : net_data[interface].packets_recv,
                       "trafficIn"          : net_data[interface].bytes_recv,
                       "droppedPacketsOut"  : net_data[interface].dropout,
                       "packetsOut"         : net_data[interface].packets_sent,
                       "trafficOut"         : net_data[interface].bytes_sent,
                       "nwStatus"           : nw_status[interface][0],
                       "ipV4"               : nw_status[interface][1],
                       "nwCableConnStatus"  : nw_cable_conn_status
                       }
            self.if_data.append(if_data)
        self.if_data.append(bmc_data)

    def _fetch_nw_status(self):
        nw_dict = {}
        nws = os.popen("ip --br a | awk '{print $1, $2, $3}'").read().split('\n')[:-1]
        for nw in nws:
            if nw.split(' ')[2]:
                ip = nw.split(' ')[2].split("/")[0]
            else:
                ip = ""
            nw_dict[nw.split(' ')[0]] = [nw.split(' ')[1], ip]
        logger.debug("network info going is : {}".format(nw_dict))
        return nw_dict

    def fetch_nw_cable_conn_status(self, interface):
        carrier_status = None
        try:
            carrier_status = self._utility_instance.fetch_nw_cable_status(self.nw_interface_path, interface)
        except Exception as e:
            if e == errno.ENOENT:
                logger.error(
                    "Problem occured while reading from nw carrier file:"
                    f" {self.nw_interface_path}/{interface}/carrier."
                    "file path doesn't exist")
            elif e == errno.EACCES:
                logger.error(
                    "Problem occured while reading from nw carrier file:"
                    f" {self.nw_interface_path}/{interface}/carrier."
                    "Not enough permission to read from the file.")
            elif e == errno.EPERM:
                logger.error(
                    "Problem occured while reading from nw carrier file:"
                    f" {self.nw_interface_path}/{interface}/carrier."
                    "Operation is not permitted.")
            else:
                logger.error(
                    "Problem occured while reading from nw carrier file:"
                    f" {self.nw_interface_path}/{interface}/carrier. Error: {e}")
        return carrier_status

    def _get_bmc_info(self):
        """
        nwCableConnection will be default UNKNOWN,
        Until solution to find bmc eth port cable connection status is found.
        """
        try:
            bmcdata = {'ifId': 'ebmc0', 'ipV4Prev': "", 'ipV4': "", 'nwStatus': "DOWN", 'nwCableConnStatus': 'UNKNOWN'}
            ipdata = sp.Popen("sudo ipmitool lan print", shell=True, stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0].decode().strip()
            bmcip = re.findall("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", ipdata)[0]
            if bmcip:
                pingbmchost = "ping -c1 -W1 -q "+bmcip
                child = sp.Popen(pingbmchost.split(), stdout=sp.PIPE)
                streamdata = child.communicate()[0] #child must be communicated before fetching return code.
                retcode = child.returncode
                if self.prev_bmcip is not None and self.prev_bmcip != bmcip:
                    bmcdata['ipV4Prev'] = self.prev_bmcip
                    bmcdata['ipV4'] = bmcip
                    self.prev_bmcip = bmcip
                else:
                    self.prev_bmcip = bmcdata['ipV4Prev'] = bmcdata['ipV4'] = bmcip
                if retcode == 0:
                    bmcdata['nwStatus'] = "UP"
                else:
                    logger.warning("BMC Host:{0} is not reachable".format(bmcip))
        except Exception as e:
            logger.error("Exception occurs while fetching bmc_info:{}".format(e))
        return bmcdata

    def _get_disk_space_alert_data(self):
        """Retrieves node information for the disk_space_alert_data json message"""
        self.total_space = int(psutil.disk_usage("/")[0])//int(self.units_factor)
        self.free_space  = int(psutil.disk_usage("/")[2])//int(self.units_factor)
        self.disk_used_percentage  = psutil.disk_usage("/")[3]

    def _load_1min_avg(self):
        """Loop forever calculating the one minute average load"""
        # Initialize list to -1 indicating the time interval has not occurred yet
        index = 0
        while index < self.cpus:
            self.load_1min_average.append(-1)
            index += 1

        while True:
            # API call blocks for one minute and then returns the value
            self.load_1min_average = psutil.cpu_percent(interval=1, percpu=True)

    def _load_5min_avg(self):
        """Loop forever calculating the five minute average load"""
        # Initialize list to -1 indicating the time interval has not occurred yet
        index = 0
        while index < self.cpus:
            self.load_5min_average.append(-1)
            index += 1

        while True:
            # API call blocks for five minutes and then returns the value
            self.load_5min_average = psutil.cpu_percent(interval=5, percpu=True)

    def _load_15min_avg(self):
        """Loop forever calculating the fifteen minute average load"""
        # Initialize list to -1 indicating the time interval has not occurred yet
        index = 0
        while index < self.cpus:
            self.load_15min_average.append(-1)
            index += 1

        while True:
            # API call blocks for fifteen minutes and then returns the value
            self.load_15min_average = psutil.cpu_percent(interval=15, percpu=True)
コード例 #7
0
def init_rabbitMQ_msg_processors():
    """The main bootstrap for sspl automated tests"""

    # Retrieve configuration file for sspl-ll service
    conf_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    path_to_conf_file = os.path.join(conf_directory, "sspl_tests.conf")
    try:
        conf_reader = ConfigReader(is_test=True, test_config_path=path_to_conf_file)
    except (IOError, ConfigReader.Error) as err:
        # We don't have logger yet, need to find log_level from conf file first
        print("[ Error ] when validating the configuration file %s :" % \
            path_to_conf_file)
        print(err)
        print("Exiting ...")
        exit(os.EX_USAGE)

    # Initialize logging
    try:
        init_logging("SSPL-Tests", "DEBUG")

    except Exception as err:
        # We don't have logger since it threw an exception, use generic 'print'
        print("[ Error ] when initializing logging :")
        print(err)
        print("Exiting ...")
        exit(os.EX_USAGE)

    # Modules to be used for testing
    conf_modules = conf_reader._get_value_list(SSPL_SETTING,
                                                MODULES)

    # Create a map of references to all the module's message queues.  Each module
    #  is passed this mapping so that it can send messages to other modules.
    msgQlist = {}

    # Create a mapping of all the instantiated modules to their names
    world.sspl_modules = {}

    # Read in product value from configuration file
    product = conf_reader._get_value(SYS_INFORMATION, PRODUCT_NAME)
    logger.info("SSPL Bootstrap: product name supported: %s" % product)

    # Use reflection to instantiate the class based upon its class name in config file
    for conf_thread in conf_modules:
        klass = globals()[conf_thread]

        # Create mappings of modules and their message queues
        world.sspl_modules[klass.name()] = klass()
        msgQlist[klass.name()] = queue.Queue()

    # Convert to a dict
    # TODO: Check use of this
    world.diskmonitor_file = json.loads("{}")

    try:
        # Loop through the list of instanced modules and start them on threads
        threads=[]
        for name, curr_module in list(world.sspl_modules.items()):
            logger.info("SSPL Tests Starting %s" % curr_module.name())
            curr_module._set_debug(True)
            thread = Thread(target=_run_thread_capture_errors,
                            args=(curr_module, msgQlist, conf_reader, product))
            thread.start()
            threads.append(thread)

        # Allow threads to startup before running tests
        time.sleep(2)

        # Clear the message queue buffer out from msgs sent at startup
        while not world.sspl_modules[RabbitMQingressProcessorTests.name()]._is_my_msgQ_empty():
            world.sspl_modules[RabbitMQingressProcessorTests.name()]._read_my_msgQ()

    except Exception as ex:
        logger.exception(ex)
コード例 #8
0
class StorageEnclosure(object):

    ENCL_FAMILY = "enclosure-family"
    LDR_R1_ENCL = "Realstor Storage_enclosure"

    EXTENDED_INFO = "extended_info"

    # SSPL Data path
    SYSINFO = "SYSTEM_INFORMATION"
    DEFAULT_RAS_VOL = f"/var/{PRODUCT_FAMILY}/sspl/data/"

    # RAS FRU alert types
    FRU_MISSING = "missing"
    FRU_INSERTION = "insertion"
    FRU_FAULT = "fault"
    FRU_FAULT_RESOLVED = "fault_resolved"

    fru_alerts = [FRU_MISSING, FRU_INSERTION, FRU_FAULT, FRU_FAULT_RESOLVED]

    # Management user & passwd
    user = ""
    passwd = ""

    encl = {}
    enclosures = {}
    memcache_frus = {}
    memcache_system = {}
    memcache_faults = {}

    def __init__(self):

        # Validate configuration file for required valid values
        try:
            self.conf_reader = ConfigReader()

        except (IOError, ConfigReader.Error) as err:
            logger.error("[ Error ] when validating the config file {0} - {1}"\
                 .format(self.CONF_FILE, err))

        self.vol_ras = self.conf_reader._get_value_with_default(\
            self.SYSINFO, COMMON_CONFIGS.get(self.SYSINFO).get("data_path"), self.DEFAULT_RAS_VOL)

        self.encl_cache = self.vol_ras + "encl/"
        self.frus = self.encl_cache + "frus/"

        self.encl.update({"frus":self.memcache_frus})
        self.encl.update({"system":self.memcache_system})

        self._check_ras_vol()

    def _check_ras_vol(self):
        """ Check for RAS volume """
        available = os.path.exists(self.vol_ras)

        if not available:
            logger.warn("Missing RAS volume, creating ...")

            try:
                orig_umask = os.umask(0)
                os.makedirs(self.vol_ras)
            except OSError as exc:
                if exc.errno == errno.EACCES:
                    logger.warn("Permission denied to create configured sspl"
                    " datapath {0}, defaulting to {1}".format(self.vol_ras,\
                    self.DEFAULT_RAS_VOL))

                    #Configured sspl data path creation failed
                    #defaulting data path to available default dir
                    self.vol_ras = self.DEFAULT_RAS_VOL

                elif exc.errno != errno.EEXIST:
                    logger.warn("%s creation failed, alerts may get missed on "
                    "sspl restart or failover!!" % (self.vol_ras))
            except Exception as err:
                logger.error("makedirs {0} failed with error {1}".format(
                    self.vol_ras, err))
            finally:
                os.umask(orig_umask)
コード例 #9
0
def conf_skipped_prefixes():
    conf_reader = ConfigReader()
    for group in skip_group_prefixes.keys():
        monitor = conf_reader._get_value_with_default(group, 'monitor', 'true')
        if monitor != 'true':
            yield skip_group_prefixes[group]
コード例 #10
0
from framework.utils.service_logging import logger
from framework.utils.config_reader import ConfigReader
from framework.base.sspl_constants import COMMON_CONFIGS, component, CONSUL_HOST, CONSUL_PORT, SSPL_STORE_TYPE
from framework.utils.conf_utils import SSPL_CONF, Conf

RABBITMQ_CLUSTER_SECTION = 'RABBITMQCLUSTER'
RABBITMQ_CLUSTER_HOSTS_KEY = 'cluster_nodes'

# Onward LDR_R2, consul will be abstracted out and won't exist as hard dependency for SSPL
if SSPL_STORE_TYPE == 'consul':
    import consul
    host = os.getenv('CONSUL_HOST', CONSUL_HOST)
    port = os.getenv('CONSUL_PORT', CONSUL_PORT)
    consul_conn = consul.Consul(host=host, port=port)

config_reader = ConfigReader()
connection_exceptions = (pika.exceptions.AMQPConnectionError,
                         pika.exceptions.ChannelClosedByBroker,
                         pika.exceptions.ChannelWrongStateError,
                         AttributeError)
connection_error_msg = (
    'RabbitMQ channel closed with error {}. Retrying with another host...')


def get_cluster_connection(username, password, virtual_host):
    """Makes connection with one of the rabbitmq node.
    """
    hosts = ""
    if SSPL_STORE_TYPE == 'consul':
        consul_key = component + '/' + RABBITMQ_CLUSTER_SECTION + '/' + RABBITMQ_CLUSTER_HOSTS_KEY
        hosts = consul_conn.kv.get(consul_key)[1]["Value"].decode()
コード例 #11
0
import pika
import pika.exceptions
import encodings.idna  # noqa

from framework.utils.service_logging import logger
from framework.utils.config_reader import ConfigReader
from framework.base.sspl_constants import COMMON_CONFIGS, component, CONSUL_HOST, CONSUL_PORT

RABBITMQ_CLUSTER_SECTION = 'RABBITMQCLUSTER'
RABBITMQ_CLUSTER_HOSTS_KEY = 'cluster_nodes'

host = os.getenv('CONSUL_HOST', CONSUL_HOST)
port = os.getenv('CONSUL_PORT', CONSUL_PORT)
consul_conn = consul.Consul(host=host, port=port)

config = ConfigReader()
connection_exceptions = (pika.exceptions.AMQPConnectionError,
                         pika.exceptions.ChannelClosedByBroker,
                         pika.exceptions.ChannelWrongStateError,
                         AttributeError)
connection_error_msg = (
    'RabbitMQ channel closed with error {}. Retrying with another host...')


def get_cluster_connection(username, password, virtual_host):
    """Makes connection with one of the rabbitmq node.
    """
    consul_key = component + '/' + RABBITMQ_CLUSTER_SECTION + '/' + RABBITMQ_CLUSTER_HOSTS_KEY
    hosts = consul_conn.kv.get(consul_key)[1]["Value"].decode()
    if isinstance(hosts, str):
        hosts = hosts.strip().split(",")
コード例 #12
0
class StoreQueue:

    RABBITMQPROCESSOR = 'RABBITMQEGRESSPROCESSOR'
    LIMIT_CONSUL_MEMORY = 'limit_consul_memory'

    def __init__(self):
        self._conf_reader = ConfigReader()
        self._max_size = int(
            self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,
                                                      self.LIMIT_CONSUL_MEMORY,
                                                      50000000))
        self._current_size = store.get("SSPL_MEMORY_USAGE")
        if self._current_size is None:
            store.put(0, "SSPL_MEMORY_USAGE")

        self._head = store.get("SSPL_MESSAGE_HEAD_INDEX")
        if self._head is None:
            store.put(0, "SSPL_MESSAGE_HEAD_INDEX")

        self._tail = store.get("SSPL_MESSAGE_TAIL_INDEX")
        if self._tail is None:
            store.put(0, "SSPL_MESSAGE_TAIL_INDEX")

    @property
    def current_size(self):
        return store.get("SSPL_MEMORY_USAGE")

    @current_size.setter
    def current_size(self, size):
        store.put(size, "SSPL_MEMORY_USAGE")

    @property
    def head(self):
        return store.get("SSPL_MESSAGE_HEAD_INDEX")

    @head.setter
    def head(self, index):
        store.put(index, "SSPL_MESSAGE_HEAD_INDEX")

    @property
    def tail(self):
        return store.get("SSPL_MESSAGE_TAIL_INDEX")

    @tail.setter
    def tail(self, index):
        store.put(index, "SSPL_MESSAGE_TAIL_INDEX")

    def is_empty(self):
        if self.tail == self.head:
            self.head = 0
            self.tail = 0
            self.current_size = 0
            return True
        else:
            return False

    def is_full(self, size_of_item):
        return (self.current_size + size_of_item) >= self._max_size

    def _create_space(self, size_of_item, reclaimed_space=0):
        if (self.current_size - reclaimed_space +
                size_of_item) >= self._max_size:
            reclaimed_space += sys.getsizeof(self.get())
            self._create_space(size_of_item, reclaimed_space)
        else:
            return

    def get(self):
        if self.is_empty():
            return
        item = store.get(f"SSPL_UNSENT_MESSAGES/{self.head}")
        store.delete(f"SSPL_UNSENT_MESSAGES/{self.head}")
        self.head += 1
        self.current_size -= sys.getsizeof(item)
        return item

    def put(self, item):
        size_of_item = sys.getsizeof(item)
        if self.is_full(size_of_item):
            logger.debug("StoreQueue, put, consul memory usage exceded limit, \
                removing old message")
            self._create_space(size_of_item)
        store.put(item, f"SSPL_UNSENT_MESSAGES/{self.tail}", pickled=False)
        self.tail += 1
        self.current_size += size_of_item
        logger.debug("StoreQueue, put, current memory usage %s" %
                     self.current_size)