def _get_value(self, cell): """ Returns the value of the xlrd Cell, based on type. """ value = None # String if cell.ctype == xlrd.XL_CELL_TEXT: return text(cell.value) # Number: integer or float if cell.ctype == xlrd.XL_CELL_NUMBER: # There is no separation between integers # and other numbers. Show it as integer if # it seems like a one. # NOTE: float.is_integer is available only in python 2.6 and above if int(cell.value) == cell.value: return u'%s' % int(cell.value) return u'%s' % cell.value # Date type if cell.ctype == xlrd.XL_CELL_DATE: value = xlrd.xldate_as_tuple(cell.value, 0) date = datetime( year=value[0], month=value[1], day=value[2], hour=value[3], minute=value[4], second=value[5], ) # Show more accurate value only if it exists if not value[1]: return u'%s' % value[0] elif value[3] and value[4] and value[5]: return text(date) else: # TODO: provide a way to define this return text(date.strftime(u'%Y-%m-%d')) # Boolean if cell.ctype == xlrd.XL_CELL_BOOLEAN: if cell.value: return _(u'True') return _(u'False') # Error if cell.ctype == xlrd.XL_CELL_ERROR: return _(u'Error') return u''
def test_enqueuing_fail_reload(workspace, monkeypatch, tmpdir): """Test failing when reloading the measure after saving. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.measure import Measure witness = [] @classmethod def r(cls, measure_plugin, path, build_dep=None): witness.append(None) return None, {'r': 't'} monkeypatch.setattr(Measure, 'load', r) with handle_dialog(): workspace.enqueue_measure(m) # Check dependencies are cleaned up assert_dependencies_released(workspace, m) assert not workspace.plugin.enqueued_measures.measures assert witness
def test_enqueueing_abort_warning(workspace, monkeypatch, tmpdir): """Test aborting enqueueing because some checks raised warnings. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.measure import Measure witness = [] def check(*args, **kwargs): witness.append(None) return True, {'r': {'t': 's'}} monkeypatch.setattr(Measure, 'run_checks', check) with handle_dialog('reject'): workspace.enqueue_measure(m) # Check dependencies are cleaned up assert_dependencies_released(workspace, m) assert not workspace.plugin.enqueued_measures.measures assert witness
def exec_infos(measure_workbench, measure, tmpdir, process_engine, sync_server): tp = measure_workbench.get_plugin('ecpy.tasks') tp._tasks.contributions['tests.WaitingTask'] = TaskInfos(cls=WaitingTask) r = RootTask(default_path=text(tmpdir)) r.add_child_task(0, WaitingTask(name='test1', sock_id='test1', sync_port=sync_server.port)) r.add_child_task(1, WaitingTask(name='test2', sock_id='test2', sync_port=sync_server.port)) measure.root_task = r deps = measure.dependencies res, msg, errors = deps.collect_runtimes() assert res return ExecutionInfos( id='test', task=r, build_deps=deps.get_build_dependencies().dependencies, runtime_deps=deps.get_runtime_dependencies('main'), observed_entries=['test'], checks=not measure.forced_enqueued, )
def test_enqueuing_fail_reload(workspace, monkeypatch, tmpdir): """Test failing when reloading the measure after saving. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.measure import Measure witness = [] @classmethod def r(cls, measure_plugin, path, build_dep=None): witness.append(None) return None, {'r': 't'} monkeypatch.setattr(Measure, 'load', r) with handle_dialog(): workspace.enqueue_measure(m) # Make sure runtimes are always released. m = workspace.plugin.workbench.get_manifest('test.measure') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert not workspace.plugin.enqueued_measures.measures assert witness
def _write_infos_in_task(self): """Write all the measure values in the root_task database. """ self.root_task.write_in_database('meas_name', self.name) self.root_task.write_in_database('meas_id', self.id) self.root_task.write_in_database('meas_date', text(date.today()))
def test_enqueueing_abort_warning(workspace, monkeypatch, tmpdir): """Test aborting enqueueing because some checks raised warnings. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.measure import Measure witness = [] def check(*args, **kwargs): witness.append(None) return True, {'r': {'t': 's'}} monkeypatch.setattr(Measure, 'run_checks', check) with handle_dialog('reject'): workspace.enqueue_measure(m) # Make sure runtimes are always released. m = workspace.plugin.workbench.get_manifest('test.measure') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert not workspace.plugin.enqueued_measures.measures assert witness
def test_enqueueing_after_warning(workspace, monkeypatch, tmpdir): """Test enqueueing after some checks raised warnings. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.measure import Measure witness = [] def check(*args, **kwargs): witness.append(None) return True, {'r': {'t': 's'}} monkeypatch.setattr(Measure, 'run_checks', check) with handle_dialog(): assert workspace.enqueue_measure(m) # Make sure runtimes are always released. m = workspace.plugin.workbench.get_manifest('test.measure') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert witness
def slugify(value, max_filename_length=200): """Create a valid filename from a bookmark title by: - Normalizing the string (see http://unicode.org/reports/tr15/) - Converting it to lowercase - Removing non-alpha characters - Converting spaces to hyphens Adapted from: - http://stackoverflow.com/questions/5574042/string-slugification-in-python - http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python See too: http://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits. """ value = text(value) value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = text(re.sub('[^\w\s-]', '', value).strip().lower()) value = text(re.sub('[-\s]+', '-', value)) if max_filename_length is not None and len(value) > max_filename_length: return value[:max_filename_length] return value
def app_dir(tmpdir): """Fixture setting the app_directory.ini file for each test. """ # Create a trash app_directory.ini file. The global fixture ensure # that it cannot be a user file. app_pref = os.path.join(ecpy_path(), APP_PREFERENCES, APP_DIR_CONFIG) app_dir = text(tmpdir) conf = ConfigObj(encoding='utf-8', indent_type=' ') conf.filename = app_pref conf['app_path'] = app_dir conf.write() yield app_dir remove(app_pref)
def app_dir(tmpdir): """Fixture setting the app_directory.ini file for each test. """ # Create a trash app_directory.ini file. The global fixture ensure # that it cannot be a user file. app_pref = os.path.join(ecpy_path(), APP_PREFERENCES, APP_DIR_CONFIG) app_dir = text(tmpdir) conf = ConfigObj() conf.filename = app_pref conf['app_path'] = app_dir conf.write() yield app_dir remove(app_pref)
def export_job(): args = dict(request.args) if not validate_dict(args, required=['job_name'], job_name=str): abort(400) job = dagobah.get_job(args['job_name']) to_send = StringIO() to_send.write(u'%s' % text(json.dumps(job._serialize(strict_json=True)))) to_send.write(u'\n') to_send.seek(0) return send_file(to_send, attachment_filename='%s.json' % job.name, as_attachment=True)
def check(self, *args, **kwargs): """Check that the default path is a valid directory. """ traceback = {} test = True if not os.path.isdir(self.default_path): test = False traceback[self.path + '/' + self.name] =\ 'The provided default path is not a valid directory' self.write_in_database('default_path', self.default_path) self.write_in_database('meas_name', self.meas_name) self.write_in_database('meas_id', self.meas_id) self.write_in_database('meas_date', text(date.today())) check = super(RootTask, self).check(*args, **kwargs) test = test and check[0] traceback.update(check[1]) return test, traceback
def test_enqueueing_and_reenqueueing_measure(workspace, monkeypatch, tmpdir): """Test enqueue a measure and re-enqueue it. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.workspace.workspace import os m.add_tool('pre-hook', 'dummy') monkeypatch.setattr(Flags, 'RUNTIME2_UNAVAILABLE', True) def r(f): raise OSError() # Fail remove temp file. Seen in coverage. monkeypatch.setattr(os, 'remove', r) old_path = m.path assert workspace.enqueue_measure(m) # Make sure we do not alter the saving path assert m.path == old_path # Check dependencies are cleaned up assert_dependencies_released(workspace, m) # Check enqueued, status assert workspace.plugin.enqueued_measures.measures m2 = workspace.plugin.enqueued_measures.measures[0] assert m2.status == 'READY' # Test re-enqueuing m2.status = 'COMPLETED' from ecpy.measure.measure import Measure def e(m): m.name = 'R' monkeypatch.setattr(Measure, 'enter_edition_state', e) workspace.reenqueue_measure(m2) assert m2.name == 'R' assert m2.status == 'READY'
def test_enqueueing_and_reenqueueing_measure(workspace, monkeypatch, tmpdir): """Test enqueue a measure and re-enqueue it. """ m = workspace.plugin.edited_measures.measures[0] m.root_task.default_path = text(tmpdir) from ecpy.measure.workspace.workspace import os m.add_tool('pre-hook', 'dummy') monkeypatch.setattr(Flags, 'RUNTIME2_UNAVAILABLE', True) def r(f): raise OSError() # Fail remove temp file. Seen in coverage. monkeypatch.setattr(os, 'remove', r) assert workspace.enqueue_measure(m) # Make sure runtimes are always released. m = workspace.plugin.workbench.get_manifest('test.measure') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected # Check enqueued, status assert workspace.plugin.enqueued_measures.measures m2 = workspace.plugin.enqueued_measures.measures[0] assert m2.status == 'READY' # Test re-enqueuing m2.status = 'COMPLETED' from ecpy.measure.measure import Measure def e(m): m.name = 'R' monkeypatch.setattr(Measure, 'enter_edition_state', e) workspace.reenqueue_measure(m2) assert m2.name == 'R' assert m2.status == 'READY'
def run(self): """ Implements the directive """ # Get content and options file_path = self.options.get('file', None) selection = self.options.get('selection', 'A1:') sheet = self.options.get('sheet', '0') header = self.options.get('header', '0') col_widths = self.options.get('widths', None) # Divide the selection into from and to values if u':' not in selection: selection += u':' fromcell, tocell = selection.split(u':') if not fromcell: fromcell = u'A1' if not tocell: tocell = None #print selection, fromcell, tocell if not file_path: return [self._report(u'file_path -option missing')] # Header option header_rows = 0 if header and header.isdigit(): header_rows = int(header) # Transform the path suitable for processing file_path = self._get_directive_path(file_path) print(u'file path: {0}'.format(file_path)) #try: et = ExcelTable(open(file_path)) table = et.create_table(fromcell=fromcell, tocell=tocell, nheader=header_rows, sheet=sheet) #except Exception as e: #raise e.with_traceback() #return [msgr.error(u'Error occured while creating table: %s' % e)] #pass #print table title, messages = self.make_title() #node = nodes.Element() # anonymous container for parsing #self.state.nested_parse(self.content, self.content_offset, node) # If empty table is created if not table: self._report('The table generated from queries is empty') return [nodes.paragraph(text='')] try: table_data = [] # If there is header defined, set the header-rows param and # append the data in row =>. build_table_from_list handles the header generation if header and not header.isdigit(): # Otherwise expect the header to be string with column names defined in # it, separating the values with comma header_rows = 1 table_data.append([ nodes.paragraph(text=hcell.strip()) for hcell in header.split(',') ]) # Put the given data in rst elements: paragraph for row in table['headers']: table_data.append( [nodes.paragraph(text=cell['value']) for cell in row]) # Iterates rows: put the given data in rst elements for row in table['rows']: row_data = [] for cell in row: class_data = [''] # Node based on formatting rules # NOTE: rst does not support nested, use class attribute instead if cell['italic']: class_data.append('italic') if cell['bold']: node = nodes.strong(text=cell['value']) else: node = nodes.paragraph(text=cell['value']) # Add additional formatting as class attributes node['classes'] = class_data row_data.append([node]) # FIXME: style attribute does not get into writer if cell['bgcolor']: rgb = [text(val) for val in cell['bgcolor']] node.attributes[ 'style'] = 'background-color: rgb(%s);' % ','.join( rgb) #print node table_data.append(row_data) # If there is no data at this point, throw an error if not table_data: return [msgr.error('Selection did not return any data')] # Get params from data num_cols = len(table_data[0]) # Get the widths for the columns: # 1. Use provided info, if available # 2. Use widths from the excelsheet # 3. Use default widths (equal to all) # # Get content widths from the first row of the table # if it fails, calculate default column widths if col_widths: col_widths = [int(width) for width in col_widths.split(',')] else: col_widths = [int(col['width']) for col in table['rows'][0]] col_width_total = sum(col_widths) col_widths = [ int(width * 100 / col_width_total) for width in col_widths ] # If still empty for some reason, use default widths if not col_widths: col_widths = self.get_column_widths(num_cols) stub_columns = 0 # Sanity checks # Different amount of cells in first and second row (possibly header and 1 row) if type(header) is not int: if len(table_data) > 1 and len(table_data[0]) != len( table_data[1]): error = msgr.error( 'Data amount mismatch: check the directive data and params' ) return [error] self.check_table_dimensions(table_data, header_rows, stub_columns) except SystemMessagePropagation as detail: return [detail.args[0]] # Generate the table node from the given list of elements table_node = self.build_table_from_list(table_data, col_widths, header_rows, stub_columns) # Optional class parameter table_node['classes'] += self.options.get('class', []) if title: table_node.insert(0, title) #print table_node return [table_node] + messages
def get(*args, **kwargs): return text(f)
def test_creating_saving_loading_measure(workspace, monkeypatch, tmpdir): """Test creating, saving, loading a measure. """ workspace.new_measure() measure = workspace.plugin.edited_measures.measures[-1] assert len(workspace.plugin.edited_measures.measures) == 2 assert measure.monitors d = tmpdir.mkdir('measure_save') f = d.join('test') from ecpy.measure.workspace.workspace import FileDialogEx # Test handling an empty answer. @classmethod def get(*args, **kwargs): pass monkeypatch.setattr(FileDialogEx, 'get_save_file_name', get) workspace.save_measure(measure) assert not d.listdir() # Test saving. @classmethod def get(*args, **kwargs): return text(f) monkeypatch.setattr(FileDialogEx, 'get_save_file_name', get) workspace.save_measure(measure) sleep(0.1) f += '.meas.ini' assert f in d.listdir() f.remove() # Test saving on previously used file. workspace.save_measure(measure) assert f in d.listdir() f = d.join('test2.meas.ini') # Test saving as in a new file. @classmethod def get(*args, **kwargs): return text(f) monkeypatch.setattr(FileDialogEx, 'get_save_file_name', get) workspace.save_measure(measure, False) assert f in d.listdir() # Test handling error in saving. from ecpy.measure.measure import Measure def r(s, m): raise Exception() monkeypatch.setattr(Measure, 'save', r) with handle_dialog(): workspace.save_measure(measure) # Test loading and dialog reject. @classmethod def get(*args, **kwargs): pass monkeypatch.setattr(FileDialogEx, 'get_open_file_name', get) assert workspace.load_measure('file') is None # Test loading measure. @classmethod def get(*args, **kwargs): return text(f) monkeypatch.setattr(FileDialogEx, 'get_open_file_name', get) workspace.load_measure('file') assert len(workspace.plugin.edited_measures.measures) == 3 m = workspace.plugin.edited_measures.measures[2] assert m.path == text(f) # Test handling loading error. @classmethod def r(cls, measure_plugin, path, build_dep=None): return None, {'r': 't'} monkeypatch.setattr(Measure, 'load', r) with handle_dialog(custom=lambda dial: dial.maximize()): workspace.load_measure('file') with pytest.raises(NotImplementedError): workspace.load_measure('template')
def test_workspace_lifecycle(workspace, tmpdir): """Test the workspace life cycle. """ process_app_events() workbench = workspace.plugin.workbench log = workbench.get_plugin('ecpy.app.logging') # Check UI creation assert workspace._selection_tracker._thread assert workspace.last_selected_measure assert workspace.content assert workspace.dock_area assert workbench.get_manifest('ecpy.measure.workspace.menus') # Check log handling assert 'ecpy.measure.workspace' in log.handler_ids # Check engine handling engine = workbench.get_manifest('test.measure').find('dummy_engine') assert engine.workspace_contributing # Check measure creation assert len(workspace.plugin.edited_measures.measures) == 1 assert workspace.plugin.edited_measures.measures[0].monitors # Create a new measure and enqueue it workspace.new_measure() process_app_events() assert len(workspace.plugin.edited_measures.measures) == 2 m = workspace.plugin.edited_measures.measures[1] m.root_task.default_path = text(tmpdir) assert workspace.enqueue_measure(m) process_app_events() # Create a tool edition window for d in workspace.dock_area.dock_items(): if d.name == 'meas_0': edition_view = d ed = edition_view.dock_widget().widgets()[0] btn = ed.widgets()[4] btn.clicked = True process_app_events() # Check observance of engine selection. workspace.plugin.selected_engine = '' assert not engine.workspace_contributing workspace.plugin.selected_engine = 'dummy' process_app_events() assert engine.workspace_contributing # Test stopping the workspace core = workbench.get_plugin('enaml.workbench.core') cmd = 'enaml.workbench.ui.close_workspace' core.invoke_command(cmd, {'workspace': 'ecpy.measure.workspace'}) assert workspace.plugin.workspace is None assert not engine.workspace_contributing assert workbench.get_manifest('ecpy.measure.workspace.menus') is None assert 'ecpy.measure.workspace' not in log.handler_ids assert not workspace._selection_tracker._thread.is_alive() # Test restarting now that we have two edited measure. cmd = 'enaml.workbench.ui.select_workspace' core.invoke_command(cmd, {'workspace': 'ecpy.measure.workspace'}) assert len(workspace.plugin.edited_measures.measures) == 2 # Check that all dock items have been restored. names = [d.name for d in workspace.dock_area.dock_items()] for n in ('meas_0', 'meas_1', 'meas_0_tools'): assert n in names # Create a false monitors_window workspace.plugin.processor.monitors_window = Window() workspace.plugin.processor.monitors_window.show() process_app_events() # Stop again core = workbench.get_plugin('enaml.workbench.core') cmd = 'enaml.workbench.ui.close_workspace' core.invoke_command(cmd, {'workspace': 'ecpy.measure.workspace'}) process_app_events() assert not workspace.plugin.processor.monitors_window.visible
def _prepare(self, *msgs): return u' '.join([text(msg) for msg in msgs])
def run(self): """ Implements the directive """ # Get content and options file_path = self.options.get('file', None) selection = self.options.get('selection', 'A1:') sheet = self.options.get('sheet', '0') header = self.options.get('header', '0') col_widths = self.options.get('widths', None) # Divide the selection into from and to values if u':' not in selection: selection += u':' fromcell, tocell = selection.split(u':') if not fromcell: fromcell = u'A1' if not tocell: tocell = None #print selection, fromcell, tocell if not file_path: return [self._report(u'file_path -option missing')] # Header option header_rows = 0 if header and header.isdigit(): header_rows = int(header) # Transform the path suitable for processing file_path = self._get_directive_path(file_path) print(u'file path: {0}'.format(file_path)) #try: et = ExcelTable(open(file_path)) table = et.create_table(fromcell=fromcell, tocell=tocell, nheader=header_rows, sheet=sheet) #except Exception as e: #raise e.with_traceback() #return [msgr.error(u'Error occured while creating table: %s' % e)] #pass #print table title, messages = self.make_title() #node = nodes.Element() # anonymous container for parsing #self.state.nested_parse(self.content, self.content_offset, node) # If empty table is created if not table: self._report('The table generated from queries is empty') return [nodes.paragraph(text='')] try: table_data = [] # If there is header defined, set the header-rows param and # append the data in row =>. build_table_from_list handles the header generation if header and not header.isdigit(): # Otherwise expect the header to be string with column names defined in # it, separating the values with comma header_rows = 1 table_data.append([nodes.paragraph(text=hcell.strip()) for hcell in header.split(',')]) # Put the given data in rst elements: paragraph for row in table['headers']: table_data.append([nodes.paragraph(text=cell['value']) for cell in row]) # Iterates rows: put the given data in rst elements for row in table['rows']: row_data = [] for cell in row: class_data = [''] # Node based on formatting rules # NOTE: rst does not support nested, use class attribute instead if cell['italic']: class_data.append('italic') if cell['bold']: node = nodes.strong(text=cell['value']) else: node = nodes.paragraph(text=cell['value']) # Add additional formatting as class attributes node['classes'] = class_data row_data.append([node]) # FIXME: style attribute does not get into writer if cell['bgcolor']: rgb = [text(val) for val in cell['bgcolor']] node.attributes['style'] = 'background-color: rgb(%s);' % ','.join(rgb) #print node table_data.append(row_data) # If there is no data at this point, throw an error if not table_data: return [msgr.error('Selection did not return any data')] # Get params from data num_cols = len(table_data[0]) # Get the widths for the columns: # 1. Use provided info, if available # 2. Use widths from the excelsheet # 3. Use default widths (equal to all) # # Get content widths from the first row of the table # if it fails, calculate default column widths if col_widths: col_widths = [int(width) for width in col_widths.split(',')] else: col_widths = [int(col['width']) for col in table['rows'][0]] col_width_total = sum(col_widths) col_widths = [int(width * 100/ col_width_total) for width in col_widths] # If still empty for some reason, use default widths if not col_widths: col_widths = self.get_column_widths(num_cols) stub_columns = 0 # Sanity checks # Different amount of cells in first and second row (possibly header and 1 row) if type(header) is not int: if len(table_data) > 1 and len(table_data[0]) != len(table_data[1]): error = msgr.error('Data amount mismatch: check the directive data and params') return [error] self.check_table_dimensions(table_data, header_rows, stub_columns) except SystemMessagePropagation as detail: return [detail.args[0]] # Generate the table node from the given list of elements table_node = self.build_table_from_list( table_data, col_widths, header_rows, stub_columns) # Optional class parameter table_node['classes'] += self.options.get('class', []) if title: table_node.insert(0, title) #print table_node return [table_node] + messages
def get_all_logs_and_info(scenario, outline='', outline_failed=None): if CONF.feature.platform.is_azure: return # Get Farm LOG.warning('Get scalarizr logs after scenario %s' % scenario.name) farm = getattr(world, 'farm', None) if not farm: LOG.error("Farm does not exists. Can't get logs. Exit from step.") return farm.servers.reload() # Get servers servers = farm.servers # Get test test_name = scenario.described_at.file.split('/')[-1].split('.')[0] LOG.debug('Test name: %s' % test_name) # Get path start_time = world.test_start_time path = (CONF.main.log_path / 'scalarizr' / test_name / start_time.strftime('%m%d-%H:%M') / scenario.name.replace('/', '-') / outline).resolve() LOG.debug('Path to save log: %s' % path) if not path.exists(): path.mkdir(mode=0o755, parents=True) # Get logs && configs for server in servers: if not server.is_scalarized: continue logs = [ # debug log {'file': str(path / '{}_scalarizr_debug.log'.format(server.id)), 'log_type': 'debug', 'compress': True}, # update log {'file': str(path / '{}_scalarizr_update.log'.format(server.id)), 'log_type': 'update', 'compress': True}] if server.status in [ServerStatus.PENDING, ServerStatus.INIT, ServerStatus.RUNNING]: try: #Get log from remote host for log in logs: server.get_log_by_api(**log) LOG.info('Save {log_type} log from server {server} to {file}'.format(server=server.id, **log)) #Get configs and role behavior from remote host only for linux family if not Dist(server.role.dist).is_windows: file = path / '{}_scalr_configs.tar.gz'.format(server.id) server.get_configs(str(file), compress=True) LOG.info('Download archive with scalr directory and behavior to: {}'.format(file)) except BaseException as e: LOG.error('Error in downloading configs: %s' % e) continue if server.status == ServerStatus.RUNNING and not CONF.feature.dist.is_windows: node = world.cloud.get_node(server) out = node.run("ps aux | grep 'bin/scal'").std_out for line in out.splitlines(): ram = line.split()[5] if len(ram) > 3: ram = '%sMB' % ram[:-3] if 'bin/scalr-upd-client' in line: LOG.info('Server %s use %s RAM for update-client' % (server.id, ram)) world.wrt(etree.Element('meta', name='szrupdateram', value=ram, serverid=server.id)) elif 'bin/scalarizr' in line: LOG.info('Server %s use %s RAM for scalarizr' % (server.id, ram)) world.wrt(etree.Element('meta', name='szrram', value=ram, serverid=server.id)) # Save farm, domains and messages info if scenario has failed if scenario.failed or outline_failed: domains = None try: domains = IMPL.domain.list(farm_id=farm.id) except Exception as e: if not 'You do not have permission to view this component' in str(e): raise LOG.warning("Get farm settings after test failure") farm_settings = IMPL.farm.get_settings(farm_id=farm.id) if servers: LOG.warning("Get scalarizr messages for every server after test failure") try: for server in servers: server.messages.reload() server_messages = [] for msg in server.messages: server_messages.append({msg.name: {'message': msg.message, 'date': str(msg.date), 'delivered': msg.delivered, 'status': msg.status, 'type': msg.type, 'id': msg.id}}) # Save server messages (path / '{}_messages.json'.format(server.id)).write_text(text(json.dumps(server_messages, indent=2))) except: pass # Save farm settings (path / 'farm_settings.json').write_text(text(json.dumps(farm_settings, indent=2))) # Save domains list if domains: (path / 'domains.json').write_text(text(json.dumps(domains, indent=2)))