def test_setup_logging_with_log_file(self, mock_logging): log.setup_logging(self.logger_name, log_file=self.log_file) mock_logging.handlers.RotatingFileHandler.assert_any_call( self.log_file, maxBytes=10485760, backupCount=10) mock_logging.handlers.RotatingFileHandler.assert_any_call( self.error_file, maxBytes=10485760, backupCount=10)
def test_setup_logging_with_root_logger(self, mock_logging): log.setup_logging(self.logger_name, root=True) self.assertRaises(AssertionError, mock_logging.getLogger.assert_called_with, self.logger_name) self.assertRaises(AssertionError, mock_logging.getLogger.assert_called_with, self.common_library) mock_logging.getLogger.assert_called_with()
def __init__(self, args): # initialize the logger self.log_filename = 'api.log' self.log_path = '/home/shared/logs/api' self.log = log.setup_logging(name=self.log_filename, level='debug', log_file='{path}/{filename}'.format( path=self.log_path, filename=self.log_filename)) self.attempts = args.attempts self.server = 'bifrost.intel.com' self.tmux_session_name = 'api-automation' self.api_script = 'automation.js' self.path_to_api = '/home/gfx/apiAutomation'
def test_setup_logging_with_different_levels(self, mock_logging): logger = log.setup_logging(self.logger_name, level='debug') logger.setLevel.assert_called_with(mock_logging.DEBUG) logger = log.setup_logging(self.logger_name, level='info') logger.setLevel.assert_called_with(mock_logging.INFO) logger = log.setup_logging(self.logger_name, level='warn') logger.setLevel.assert_called_with(mock_logging.WARN) logger = log.setup_logging(self.logger_name, level='error') logger.setLevel.assert_called_with(mock_logging.ERROR) logger = log.setup_logging(self.logger_name, level='critical') logger.setLevel.assert_called_with(mock_logging.CRITICAL) logger = log.setup_logging(self.logger_name, level='notset') logger.setLevel.assert_called_with(mock_logging.NOTSET) logger = log.setup_logging(self.logger_name, level='') logger.setLevel.assert_called_with(mock_logging.INFO)
def test_setup_logging_with_default_values(self, mock_logging): # call the function under test logger = log.setup_logging(self.logger_name) # verify two handlers are getting added for logging to files # and one is getting added for logging to the console logger.addHandler.assert_any_call(mock_logging.StreamHandler()) logger.addHandler.assert_any_call( mock_logging.handlers.RotatingFileHandler()) logger.addHandler.assert_any_call( mock_logging.handlers.RotatingFileHandler()) mock_logging.handlers.RotatingFileHandler.assert_any_call( 'gfx.log', maxBytes=10485760, backupCount=10) mock_logging.handlers.RotatingFileHandler.assert_any_call( 'gfx.error.log', maxBytes=10485760, backupCount=10) # verify loggers are getting created (no root logger) mock_logging.getLogger.assert_any_call(self.logger_name) mock_logging.getLogger.assert_any_call(self.common_library) mock_logging.getLogger.assert_any_call(self.stable_package) # verify log level defaults to INFO logger.setLevel.assert_called_with(mock_logging.INFO)
def __init__(self): self.config_file = '/home/custom/config.yml' if not os.path.isfile(self.config_file): bash.message('err', '{0}: does not exists'.format(self.config_file)) sys.exit(1) self.data = yaml.load(open('/home/custom/config.yml')) self.dut_user = self.data['dut_conf']['dut_user'] self.dut_hostname = self.data['dut_conf']['dut_hostname'] self.dut_static_ip = self.data['dut_conf']['dut_static_ip'] self.raspberry_number = self.data['raspberry_conf']['raspberry_number'] self.raspberry_power_switch = self.data['raspberry_conf'][ 'raspberry_power_switch'] self.log_folder_path = os.path.join('/home', self.dut_user, 'rendercheck_logs') self.control_file = os.path.join(self.log_folder_path, 'control') if os.path.isfile(self.control_file): bash.message('info', 'rendercheck has finished') bash.message('info', 'nothing to do') sys.exit(0) # initialize the logger os.system('mkdir -p {0}'.format(self.log_folder_path)) self.log_file = os.path.join(self.log_folder_path, 'rendercheck.log') if os.path.isfile(self.log_file): os.remove(self.log_file) self.log = log.setup_logging(name='launcher', level='debug', log_file='{0}'.format(self.log_file)) self.log.info('initialize the logger for ({0}) rendercheck'.format( self.log_file))
def __init__(self, **kwargs): """Class constructor :param kwargs: - tag: build drm-tip kernel and create a QA-tag. The accepted value is : True - daily: build drm-tip kernel. The accepted value is : True - commit: build drm-tip kernel with a specific commit. The commit must have at least 7 digits to be recognized by git. """ self.tag = kwargs.get('tag', None) self.daily = kwargs.get('daily', None) self.specific_commit = kwargs.get('commit', None) list_to_validate = [self.tag, self.daily, self.specific_commit] if list_to_validate.count(None) != 2: raise RuntimeError('please set one value') self.this_path = os.path.dirname(os.path.abspath(__file__)) self.data = yaml.load(open(os.path.join(self.this_path, 'kernel.yml'))) self.kernel_keys = self.data['drm-tip']['kernel_keys'] self.kernel_name = self.data['drm-tip']['kernel_name'].lower() self.debian_packages_local_path = '/home/shared/kernels_mx/drm-tip' self.kernel_folder_nickname = self.data['drm-tip']['kernel_folder_nickname'] self.kernel_id = 'drm-tip' if self.tag: self.log_path = '/home/shared/logs/kernel/drm-intel-qa' self.kernel_keys = self.data['drm-intel-qa']['kernel_keys'] self.kernel_name = self.data['drm-intel-qa']['kernel_name'].lower() self.debian_packages_local_path = '/home/shared/kernels_mx/drm-intel-qa' self.kernel_folder_nickname = self.data['drm-intel-qa'][ 'kernel_folder_nickname'] self.kernel_id = 'drm-intel-qa' elif self.daily: self.log_path = '/home/shared/logs/kernel/daily' elif self.specific_commit: self.log_path = '/home/shared/logs/kernel/specific_commit' else: self.log_path = '/home/shared/logs/kernel/drm-tip' # initialize the logger self.log_filename = 'kernel.log' self.log = log.setup_logging( name=self.log_filename, level='debug', log_file='{path}/{filename}'.format( path=self.log_path, filename=self.log_filename) ) self.log.info('saving the log in: {log_file}'.format( log_file=os.path.join(self.log_path, self.log_filename))) # check if self.kernel_name does not exceeds 33 characters if len(self.kernel_name) > 33: self.log.error( '{0} : exceeds 33 characters, please make it smaller'.format( self.kernel_name)) sys.exit(1) # check for characters not allowed in kernel name # reference : https://www.debian.org/doc/debian-policy/#s-f-source rule = 'package names must consist only of lower case letters (a-z), ' \ 'digits (0-9), plus (+) and minus (-) signs, and periods (.)\n ' \ 'They must be at least two characters long and must start with an ' \ 'alphanumeric character' characters_not_allowed = ['_', '~'] for character in characters_not_allowed: if character in self.kernel_name: self.log.error("character '{0}'not allowed in : {name}".format( character, name=self.kernel_name)) self.log.info(rule) sys.exit(1) self.mailing_list = self.data['miscellaneous']['mailing_list'] # linuxgraphics.intel.com configuration self.server_for_upload_package = self.data[ 'miscellaneous']['server_for_upload_package'] self.server_user = self.data['miscellaneous']['server_user'] self.week_number = bash.get_output('date +"%-V"') self.month = bash.get_output('month=`date +"%b"`; echo ${month^^}') self.week_day = bash.get_output('date +%A').lower() self.year = bash.get_output('date +%G') self.hour = bash.get_output('date +"%I-%M-%S %p"') self.enforce = self.data['miscellaneous']['enforce'] # getting the ip self.ip = network.get_ip() # environment variables os.environ['GIT_SSL_NO_VERIFY'] = '1' # this variable will change if there is a new commit for the kernel. self.kernel_commit_built = None
from gfx_qa_tools.common import bash from gfx_qa_tools.common import log # getting the username from config.yml data = yaml.load(open('/home/custom/config.yml')) dut_user = data['dut_conf']['dut_user'] # initializing the logger log_filename = 'sentinel.log' log_path = '/home/{user}/logs'.format(user=dut_user) if not os.path.exists(log_path): os.makedirs(log_path) logger = log.setup_logging('sentinel', level='debug', log_file='{path}/{filename}'.format( path=log_path, filename=log_filename)) path_to_files = '/var/log' # the files to check can have rotatory files. files_to_check = ['syslog', 'kern.log'] # size in bytes maximum_size_allowed = 15360 while True: for archive in files_to_check: # getting a list for the rotatory files (if any) rotatory_files = bash.get_output('ls {0} | grep {1}'.format( path_to_files, archive)).split()
"""Create htmls and generate all stuff require for visualization pages""" import argparse import os import re from gfx_qa_tools.common import log from visualization import base_visualization as vis # currently this source is exe as script without logger = log.setup_logging('create_visualization', level='debug', log_file='create_vis.log', root=True) # Path's where visualization pages will save VIS_PATH_HTML = os.path.join(vis.VIS_PATH_MAIN, 'html') VIS_PATH_HTML_FF = os.path.join(VIS_PATH_HTML, vis.IGT_TEST_SUITE_FF) VIS_PATH_HTML_ALL = os.path.join(VIS_PATH_HTML, vis.IGT_TEST_SUITE_ALL) # Path's/File's that scripts to creates visualization from extern repo VIS_GEN_MAIN = os.path.join(vis.VIS_GEN_EXTERN_REPO, 'vis.py') VIS_GEN_TEST = os.path.join(vis.VIS_GEN_EXTERN_REPO, 'vis-test-results.py') VIS_GEN_HISTORY = os.path.join(vis.VIS_GEN_EXTERN_REPO, 'vis-history.py') # Constant for default amount builds (columns) on pages VIS_DEF_AMOUNT_MAIN = 5 VIS_DEF_AMOUNT_TEST = 20 VIS_DEF_AMOUNT_HIS = 20
from gfx_qa_tools.common import log import yaml data = yaml.load(open('/home/custom/config.yml')) default_mailing_list = data['suite_conf']['default_mailing_list'] dut_hostname = data['dut_conf']['dut_hostname'] dut_static_ip = data['dut_conf']['dut_static_ip'] dut_user = data['dut_conf']['dut_user'] sender = '*****@*****.**' control_file = os.path.join('/home', dut_user, '.xserver.ctrl') # initialize the logger log_file = os.path.join('/home', dut_user, 'xserver.log') logger = log.setup_logging(name='launcher', level='debug', log_file='{0}'.format(log_file)) logger.info('initialize the logger for ({0}) rendercheck'.format(log_file)) def check_xserver_xorg(): """Check the name of the configuration file xorg.conf in the system After a debian package is setup through clonezilla environment when Ubuntu starts for some reason the name of this file is changed to xorg.conf<random_number> and if the file has not the correct name TTY7 (X) will not be enabled. """ # validating if xserver is on graphic stack installed in the system path_graphic_stack = '/home/custom/graphic_stack/packages'
def test_setup_logging_with_no_console(self, mock_logging): logger = log.setup_logging(self.logger_name, console_log=False) self.assertRaises(AssertionError, logger.addHandler.assert_any_call, mock_logging.StreamHandler())
def test_setup_logging_formatter(self, mock_logging): log.setup_logging(self.logger_name) mock_logging.Formatter.assert_called_once_with( '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
OUTPUT_DIR = '/tmp' JSON_UNCOMPRESSED_NAME = 'results.json' JSON_COMPRESSED_NAME = 'results.json.tar.gz' LINUXGRAPHICS_REPORTS_PATH = '/var/www/html/reports/intel-gpu-tools' LINUXGRAPHICS_BASE_URL = 'http://linuxgraphics.intel.com/igt-reports' # getting configurations from config.ini of linuxgraphics. LINUXGRAPHICS_USER = config.get('linuxgraphics', 'user') LINUXGRAPHICS_IP = config.get('linuxgraphics', 'ip') LINUXGRAPHICS_CNAME = config.get('linuxgraphics', 'cname') # logger setup LOG_FILENAME = 'synchronize_linuxgraphics.log' LOG_PATH = config.get('igt', 'log_path') LOGGER = log.setup_logging('synchronize_linuxgraphics', level='debug', log_file='{path}/{filename}'.format( path=LOG_PATH, filename=LOG_FILENAME)) def check_an_url_from_linuxgraphics(url): """Check if an url exists. The aim of this function is to check if an a specified url exists. :param url: this url must correspond to one from http://linuxgraphics.intel.com/igt-reports. """ try: requests.get(url, timeout=5) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
def __init__(self, **kwargs): """Class constructor :param kwargs: - stack: build a new graphic stack (boolean value). - kernel: build a new kernel (boolean value). - firmware: set the configurations files with firmware (if any) (boolean value). - dryrun: simulate the run of this script, do not setup any DUT (boolean value). - grabsystemsbusy: grab the systems in (platforms_not_launched.yml) and launch them with latest configuration. - visualization: enabling reporting in visualization. - report: enabling reporting to Test Report Center. """ # initialize the logger log_name = 'orchestrator' log_path = '/home/shared/logs/orchestrator/fastfeedback' self.log = log.setup_logging(name=log_name, level='info', log_file='{path}/{filename}.log'.format( path=log_path, filename=log_name)) self.stack = kwargs.get('stack', None) self.kernel = kwargs.get('kernel', None) self.firmware = kwargs.get('firmware', None) self.dryrun = kwargs.get('dryrun', None) self.grab_systems_busy = kwargs.get('grabsystemsbusy', None) self.visualization = kwargs.get('visualization', None) self.report = kwargs.get('report', 'sand') # environment variables os.environ['GIT_SSL_NO_VERIFY'] = '1' self.this_path = os.path.dirname(os.path.abspath(__file__)) self.main_path = '/home/shared/build' self.gfx_qa_repo = 'https://github.intel.com/linuxgraphics/gfx-qa-tools.git' self.suite = 'intel-gpu-tools' # graphic stack variables self.gfx_stack_path = os.path.join(self.main_path, 'gfx_stack', self.suite) self.gfx_stack_config = os.path.join(self.gfx_stack_path, 'gfx-qa-tools', 'gfx_stack', 'config.yml') self.debian_packages_path = '/home/shared/gfx_stack/packages' self.kernel_packages_path = '/home/shared/kernels_mx/drm-intel-qa' self.week_number = bash.get_output('date +"%-V"') self.week_day = bash.get_output('date +%A') self.year = bash.get_output('date +%G') self.hour = bash.get_output('date +"%I-%M-%S"') self.sender = '*****@*****.**' self.mailing_list = [ '*****@*****.**', '*****@*****.**' ] get_raspberries = 'http://10.219.106.111:2020/getraspberries' self.raspberries_json_data = requests.get(get_raspberries, timeout=20).json() self.table_content_launch, self.table_content_not_launch = ([], ) * 2 self.systems_to_launch = {} self.systems_to_not_launch = {}
class ElectricalControlManager(object): """ElectricalControlManager class helps to handle the power control. The purpose of this class is to help with the power control of the DUTs connected to the automated system. """ logger = log.setup_logging('raspberry', log_file='{0}/raspberry.log'.format( config.get('raspberry', 'log_path')), console_log=False) def __init__(self, **kwargs): """Class builder This class builder receives parameters in order to make it portable and be able to use it as a module inside of others scripts, the possible parameters that this class receives are: :param kwargs: - raspberry: which is the raspberry number, the possible values are: 1-4 (int values). (mandatory value!) - switch: which is the switch number in the automated system, the possible values are: 1-8 for a single switch and 9 for all switches (int values). - coldreset: select a single switch or all switches to apply a cold reset (turn off and turn on) from a raspberry system, the possible values are: 1-8 for a single switch and 9 for all switches (int values). - down: select a single switch or all switches to turn off from a raspberry system the possible values are : 1-8 for a single switch and 9 for all switches (int values). - up: select a single switch or all switches to turn on from a raspberry system the possible values are : 1-8 for a single switch and 9 for all switches (int values). - console: suppress/enable console output, the possible values are: True: for enabling it False: for disabling it (useful for automated executions) If this option is not send, the default value is None. - cutter: turn on and off a usb cutter, the possible values are: on: For turning on the USB-Cutter off: This will not apply any power action in the USB-Cutter If this option is not send, the default value is None. """ self.raspberry = kwargs.get('raspberry', None) if self.raspberry is None: raise KeyError('raspberry value can not be empty') self.switch = kwargs.get('switch', None) self.coldreset = kwargs.get('coldreset', None) self.down = kwargs.get('down', None) self.up = kwargs.get('up', None) self.console = kwargs.get('console', None) self.cutter = kwargs.get('cutter', None) self.data = requests.get( 'http://10.219.106.111:2020/getraspberries').json() @staticmethod def check_raspberry_connection(raspberry_number, raspberry_ip, console=True): """Check the raspberry connection through ping command. :param raspberry_number: the raspberry to check. :param raspberry_ip: the raspberry ip. :param console: print messages in the console. """ if console: bash.message( 'info', 'testing connection with (raspberry {number}) ({ip})'.format( number=raspberry_number, ip=raspberry_ip), '') bash.return_command_status( 'ping -c 1 {ip} &> /dev/null'.format(ip=raspberry_ip), print_messages=console) @staticmethod def perform_ssh_cmd(raspberry_ip, cmd, **kwargs): """Managing power options in a raspberry. :param raspberry_ip: the current raspberry ip address. :param cmd: the command to execute in the raspberry. :param kwargs: some of the possible values are: - gpio: the current gpio of the raspberry. - timeout: the timeout for ssh connection. - console_output: the possible values are: True: enable console output (default option) False: suppress console output (optional) """ raspberry_user = '******' raspberry_password = '******' timeout = kwargs['timeout'] if 'timeout' in kwargs else 10 console_output = kwargs.get('console_output', True) exit_status, stdout = RemoteClient( raspberry_ip, user=raspberry_user, password=raspberry_password).run_command( '{sudo}{cmd} {gpio}'.format( sudo='sudo ' if 'gpio' in kwargs else '', cmd=cmd, gpio=kwargs.get('gpio', ''), timeout=timeout)) if console_output: print('>>> (info) stdout ({stdout})'.format(stdout=stdout)) if exit_status == 0: print('DONE') elif exit_status == 77: print('SKIP') else: print('FAIL') @staticmethod def read_clonezilla_file(clonezilla_file): """Read continuously a file The aim of this function is to read continuously a clonezilla file in order to show the current clonezilla progress from the DUT through IEM/terminal. :param clonezilla_file: which is the file to read """ logger = ElectricalControlManager.logger logger.info('Reading clonezilla file: {0}'.format(clonezilla_file)) if os.path.isfile(clonezilla_file): os.remove(clonezilla_file) logger.info('An old clonezilla file was found and was deleted') end_line = 'clonezilla has finished' minutes_allowed_to_wait = config.getint('raspberry', 'clonezilla_file_timeout') # wait for the file to be created print('waiting to read: {0}'.format(clonezilla_file)) logger.info('waiting to read: {0}'.format(clonezilla_file)) while True: start_time = time.time() while not os.path.isfile(clonezilla_file): logger.debug( 'waiting for clonezilla file to exist ({0}s)'.format( time.time() - start_time)) time.sleep(1) if time.time() > start_time + (minutes_allowed_to_wait * 60): exit_msg = ( 'unable to read {cz_file} after {min} minutes'.format( cz_file=clonezilla_file, min=minutes_allowed_to_wait)) logger.warning(exit_msg) sys.exit(exit_msg) logger.info( 'the clonezilla file was found, continuing with the reading') with open(clonezilla_file, 'r', buffering=0) as cf: file_size = 0 while True: # we need a way to refresh the reference to the file in case it # changes (file deleted then re-created), so if at any point # this file becomes smaller than it was, in this case means it was # deleted and re-created, so we need to start reading from the # beginning of the file if not os.path.isfile(clonezilla_file): # if at one point we cannot read the file break out of # this cycle to go to the outer cycle break logger.debug('file size: {0}'.format( os.stat(clonezilla_file).st_size)) logger.debug('previous size: {0}'.format(file_size)) if os.stat(clonezilla_file).st_size < file_size: cf.seek(0) logger.info( 'the clonezilla file was re-created, reading from the top' ) where = cf.tell() line = cf.readline() logger.debug('file pointer: {0}'.format(where)) file_size = os.stat(clonezilla_file).st_size if not line: time.sleep(1) cf.seek(where) else: logger.debug('printing line: {0}'.format(line)) print line, # if the printed line is the one we expect as the # last line, exit if end_line in line: logger.info('end line was found, exiting file') return def manager(self): """Perform specific actions. variables: - self.args.coldreset: this argument perform a cold reset in the specified switch. Cold reset mean turn off and then turn on the DUT. - self.args.down: this argument perform a power off in the specified switch. - self.args.up: this argument perform power on in the specified switch. """ gpios = [] usb_cutter = [] raspberry_ip = None raspberry_python_path = '/home/pi/dev' raspberry_cleaware_path = '/home/pi/dev/raspberry/clewarecontrol/cutter.py' for element in self.data: if self.raspberry == int(element['name'].split()[1].encode()): raspberry_ip = element['ip'].encode() for item in element['powerSwitches']: gpios.append(item['GPIO'].encode()) usb_cutter.append(item['usbCutter'].encode()) if self.coldreset: self.check_raspberry_connection(self.raspberry, raspberry_ip, self.console) cmd = ['sudo power -off', 'sudo power -on'] if self.coldreset != 9: gpio = int(self.coldreset) - 1 gpio = gpios[gpio] for argument in cmd: self.perform_ssh_cmd(raspberry_ip, argument, gpio=gpio, console_output=self.console) if argument == cmd[0] and self.cutter: # turn on the usb-cutter command = 'PYTHONPATH={python_path} python {cleware} ' \ '-c {cutter_id} -a on'.format( python_path=raspberry_python_path, cleware=raspberry_cleaware_path, cutter_id=usb_cutter[self.coldreset - 1]) self.perform_ssh_cmd(raspberry_ip, command, console_output=self.console) time.sleep(5) if self.console and self.cutter: self.read_clonezilla_file( os.path.join('/home/shared/raspberry', 'raspberry-0{0}'.format(self.raspberry), 'switch-0{0}'.format(self.coldreset), 'clonezilla')) else: for gpio in gpios: for argument in cmd: self.perform_ssh_cmd(raspberry_ip, argument, gpio=gpio, console_output=self.console) time.sleep(5) elif self.down: cmd = 'sudo power -off' if self.down != 9: gpio = int(self.down) - 1 gpio = gpios[gpio] self.check_raspberry_connection(self.raspberry, raspberry_ip, self.console) self.perform_ssh_cmd(raspberry_ip, cmd, gpio=gpio, console_output=self.console) else: for gpio in gpios: self.perform_ssh_cmd(raspberry_ip, cmd, gpio=gpio, console_output=self.console) time.sleep(5) elif self.up: cmd = 'sudo power -on' if self.up != 9: gpio = int(self.up) - 1 gpio = gpios[gpio] self.check_raspberry_connection(self.raspberry, raspberry_ip, self.console) self.perform_ssh_cmd(raspberry_ip, cmd, gpio=gpio, console_output=self.console) else: for gpio in gpios: self.perform_ssh_cmd(raspberry_ip, cmd, gpio=gpio, console_output=self.console) time.sleep(5) elif self.cutter and self.switch: # this part of the code turn on/off the USB-Cutters in the system. if self.switch != 9: command = 'PYTHONPATH={python_path} python {cleware} ' \ '-c {cutter_id} -a {cutter_action}'.format( python_path=raspberry_python_path, cleware=raspberry_cleaware_path, cutter_id=usb_cutter[self.switch - 1], cutter_action=self.cutter, timeout=80) self.perform_ssh_cmd(raspberry_ip, command, console_output=self.console) else: for cutter in usb_cutter: command = 'PYTHONPATH={python_path} python {cleware} ' \ '-c {cutter_id} -a {cutter_action}'.format( python_path=raspberry_python_path, cleware=raspberry_cleaware_path, cutter_id=cutter, cutter_action=self.cutter, timeout=80) self.perform_ssh_cmd(raspberry_ip, command, console_output=self.console)