def upgrade_environment(self, db): # Create or update db for realm in self.SCHEMA: realm_metadata = self.SCHEMA[realm] if need_db_create_for_realm(self.env, realm, realm_metadata, db): create_db_for_realm(self.env, realm, realm_metadata, db) elif need_db_upgrade_for_realm(self.env, realm, realm_metadata, db): upgrade_db_for_realm(self.env, 'testmanager.upgrades', realm, realm_metadata, db) # Create default values for configuration properties and initialize counters db_insert_or_ignore(self.env, 'testconfig', 'NEXT_CATALOG_ID', '0') db_insert_or_ignore(self.env, 'testconfig', 'NEXT_TESTCASE_ID', '0') db_insert_or_ignore(self.env, 'testconfig', 'NEXT_PLAN_ID', '0') # Create the basic "TC" Wiki page, used as the root test catalog tc_page = WikiPage(self.env, 'TC') if not tc_page.exists: tc_page.text = ' ' tc_page.save('System', '', '127.0.0.1') if self._need_upgrade(db): # Set custom ticket field to hold related test case custom = self.config['ticket-custom'] config_dirty = False if 'testcaseid' not in custom: custom.set('testcaseid', 'text') custom.set('testcaseid.label', _("Test Case")) config_dirty = True if 'planid' not in custom: custom.set('planid', 'text') custom.set('planid.label', _("Test Plan")) config_dirty = True # Set config section for test case outcomes if 'test-outcomes' not in self.config: self.config.set('test-outcomes', 'green.SUCCESSFUL', _("Successful")) self.config.set('test-outcomes', 'yellow.TO_BE_TESTED', _("Untested")) self.config.set('test-outcomes', 'red.FAILED', _("Failed")) self.config.set('test-outcomes', 'default', 'TO_BE_TESTED') config_dirty = True # Set config section for default visible columns in tabular view if self.config.get('testmanager', 'testcatalog.visible_description') == '': self.config.set('testmanager', 'testcatalog.visible_description', 'False') config_dirty = True if config_dirty: self.config.save()
def _render_settings(self, req, cat, page, component): req.perm.assert_permission('TRAC_ADMIN') data = {} try: if req.method == 'POST': default_days_back = req.args.get('default_days_back', '90') default_interval = req.args.get('default_interval', '7') testplan_sortby = req.args.get('testplan_sortby', 'name') open_new_window = req.args.get('open_new_window', 'False') testcatalog_default_view = req.args.get('testcatalog_default_view', 'tree') testplan_default_view = req.args.get('testplan_default_view', 'tree') self.env.config.set('testmanager', 'default_days_back', default_days_back) self.env.config.set('testmanager', 'default_interval', default_interval) self.env.config.set('testmanager', 'testplan.sortby', testplan_sortby) self.env.config.set('testmanager', 'testcase.open_new_window', ('False', 'True')[open_new_window == 'on']) self.env.config.set('testmanager', 'testcatalog.default_view', testcatalog_default_view) self.env.config.set('testmanager', 'testplan.default_view', testplan_default_view) _set_columns_visible(self.env, 'testcatalog', req.args, self.env.config) _set_columns_visible(self.env, 'testplan', req.args, self.env.config) _set_columns_total_operation(self.env, 'testcatalog', req.args, self.env.config) _set_columns_total_operation(self.env, 'testplan', req.args, self.env.config) self.env.config.save() add_notice(req, _("Settings saved")) except: self.env.log.error(formatExceptionInfo()) add_warning(req, _("Error saving the settings")) data['default_days_back'] = self.env.config.get('testmanager', 'default_days_back', '90') data['default_interval'] = self.env.config.get('testmanager', 'default_interval', '7') data['testplan_sortby'] = self.env.config.get('testmanager', 'testplan.sortby', 'name') data['open_new_window'] = self.env.config.get('testmanager', 'testcase.open_new_window', 'False') data['testcatalog_default_view'] = self.env.config.get('testmanager', 'testcatalog.default_view', 'tree') data['testplan_default_view'] = self.env.config.get('testmanager', 'testplan.default_view', 'tree') testcatalog_columns, foo, bar = get_all_table_columns_for_object(self.env, 'testcatalog', self.env.config) testplan_columns, foo, bar = get_all_table_columns_for_object(self.env, 'testplan', self.env.config) data['testcatalog_columns'] = testcatalog_columns data['testplan_columns'] = testplan_columns return 'admin_settings.html', data
def _render_settings(self, req, cat, page, component): req.perm.assert_permission('TRAC_ADMIN') data = {} try: if req.method == 'POST': default_days_back = req.args.get('default_days_back', '90') default_interval = req.args.get('default_interval', '7') testplan_sortby = req.args.get('testplan_sortby', 'custom') open_new_window = req.args.get('open_new_window', 'False') testcatalog_default_view = req.args.get('testcatalog_default_view', 'tree') testplan_default_view = req.args.get('testplan_default_view', 'tree') self.env.config.set('testmanager', 'default_days_back', default_days_back) self.env.config.set('testmanager', 'default_interval', default_interval) self.env.config.set('testmanager', 'testplan.sortby', testplan_sortby) self.env.config.set('testmanager', 'testcase.open_new_window', ('False', 'True')[open_new_window == 'on']) self.env.config.set('testmanager', 'testcatalog.default_view', testcatalog_default_view) self.env.config.set('testmanager', 'testplan.default_view', testplan_default_view) _set_columns_visible(self.env, 'testcatalog', req.args, self.env.config) _set_columns_visible(self.env, 'testplan', req.args, self.env.config) _set_columns_total_operation(self.env, 'testcatalog', req.args, self.env.config) _set_columns_total_operation(self.env, 'testplan', req.args, self.env.config) self.env.config.save() add_notice(req, _("Settings saved")) except: self.env.log.error(formatExceptionInfo()) add_warning(req, _("Error saving the settings")) data['default_days_back'] = self.env.config.get('testmanager', 'default_days_back', '90') data['default_interval'] = self.env.config.get('testmanager', 'default_interval', '7') data['testplan_sortby'] = self.env.config.get('testmanager', 'testplan.sortby', 'custom') data['open_new_window'] = self.env.config.get('testmanager', 'testcase.open_new_window', 'False') data['testcatalog_default_view'] = self.env.config.get('testmanager', 'testcatalog.default_view', 'tree') data['testplan_default_view'] = self.env.config.get('testmanager', 'testplan.default_view', 'tree') testcatalog_columns, foo, bar = get_all_table_columns_for_object(self.env, 'testcatalog', self.env.config) testplan_columns, foo, bar = get_all_table_columns_for_object(self.env, 'testplan', self.env.config) data['testcatalog_columns'] = testcatalog_columns data['testplan_columns'] = testplan_columns return 'admin_settings.html', data
def do_upgrade_environment(db): for realm in self.SCHEMA: realm_schema = self.SCHEMA[realm] if need_db_create_for_realm(self.env, realm, realm_schema, db): create_db_for_realm(self.env, realm, realm_schema, db) elif need_db_upgrade_for_realm(self.env, realm, realm_schema, db): upgrade_db_for_realm(self.env, 'testmanager.upgrades', realm, realm_schema, db) # Create default values for configuration properties and initialize counters db_insert_or_ignore(self.env, 'testconfig', 'NEXT_CATALOG_ID', '0', db) db_insert_or_ignore(self.env, 'testconfig', 'NEXT_TESTCASE_ID', '0', db) db_insert_or_ignore(self.env, 'testconfig', 'NEXT_PLAN_ID', '0', db) db.commit() # Fix templates with a blank id if self._check_blank_id_templates(db): self._fix_blank_id_templates(db) db.commit() self.env.log.info(_(""" Test Manager templates with blank IDs have been fixed.\n Please go to the Administration panel, in the Test Manager Templates section, and check the associations between templates and Test Cases and Test Catalogs.\n You will have to manually fix any misconfiguration you should find. """)) # Create the basic "TC" Wiki page, used as the root test catalog tc_page = WikiPage(self.env, 'TC') if not tc_page.exists: tc_page.text = ' ' tc_page.save('System', '', '127.0.0.1') db.commit() if self._need_upgrade(db): # Set custom ticket field to hold related test case custom = self.config['ticket-custom'] config_dirty = False if 'testcaseid' not in custom: custom.set('testcaseid', 'text') custom.set('testcaseid.label', _("Test Case")) config_dirty = True if 'planid' not in custom: custom.set('planid', 'text') custom.set('planid.label', _("Test Plan")) config_dirty = True # Set config section for test case outcomes if 'test-outcomes' not in self.config: self.config.set('test-outcomes', 'green.SUCCESSFUL', _("Successful")) self.config.set('test-outcomes', 'yellow.TO_BE_TESTED', _("Untested")) self.config.set('test-outcomes', 'red.FAILED', _("Failed")) self.config.set('test-outcomes', 'default', 'TO_BE_TESTED') config_dirty = True if config_dirty: self.config.save()
def process_request(self, req): testmanagersystem = TestManagerSystem(self.env) tc_statuses = testmanagersystem.get_tc_statuses_by_color() if 'testmanager' in self.config: self.default_days_back = self.config.getint( 'testmanager', 'default_days_back', TESTMANAGER_DEFAULT_DAYS_BACK) self.default_interval = self.config.getint( 'testmanager', 'default_interval', TESTMANAGER_DEFAULT_INTERVAL) req_content = req.args.get('content') testplan = None catpath = None testplan_contains_all = True self.env.log.debug("Test Stats - process_request: %s" % req_content) grab_testplan = req.args.get('testplan') if grab_testplan and not grab_testplan == "__all": testplan = grab_testplan.partition('|')[0] catpath = grab_testplan.partition('|')[2] tp = TestPlan(self.env, testplan, catpath) testplan_contains_all = tp['contains_all'] today = datetime.today() today = today.replace(tzinfo=req.tz) + timedelta(2) # Stats start from two years back beginning = today - timedelta(720) if (not req_content == None) and (req_content == "piechartdata"): num_successful = 0 for tc_outcome in tc_statuses['green']: num_successful += self._get_num_tcs_by_status( beginning, today, tc_outcome, testplan, req) num_failed = 0 for tc_outcome in tc_statuses['red']: num_failed += self._get_num_tcs_by_status( beginning, today, tc_outcome, testplan, req) num_to_be_tested = 0 if testplan_contains_all: num_to_be_tested = self._get_num_testcases( beginning, today, catpath, req) - num_successful - num_failed else: for tc_outcome in tc_statuses['yellow']: num_to_be_tested += self._get_num_tcs_by_status( beginning, today, tc_outcome, testplan, req) jsdstr = """ [ {"response": "%s", "count": %s}, {"response": "%s", "count": %s}, {"response": "%s", "count": %s} ] """ % (_("Successful"), num_successful, _("Failed"), num_failed, _("To be tested"), num_to_be_tested) jsdstr = jsdstr.strip() if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return if not None in [ req.args.get('end_date'), req.args.get('start_date'), req.args.get('resolution') ]: # form submit grab_at_date = req.args.get('end_date') grab_from_date = req.args.get('start_date') grab_resolution = req.args.get('resolution') # validate inputs if None in [grab_at_date, grab_from_date]: raise TracError('Please specify a valid range.') if None in [grab_resolution]: raise TracError('Please specify the graph interval.') if 0 in [ len(grab_at_date), len(grab_from_date), len(grab_resolution) ]: raise TracError( 'Please ensure that all fields have been filled in.') if not grab_resolution.isdigit(): raise TracError( 'The graph interval field must be an integer, days.') at_date = parse_date(grab_at_date, req.tz) + timedelta(2) from_date = parse_date(grab_from_date, req.tz) graph_res = int(grab_resolution) else: # default data todays_date = datetime.today() at_date = todays_date #+ timedelta(1) # datetime.combine(todays_date,time(23,59,59,0,req.tz)) at_date = at_date.replace(tzinfo=req.tz) + timedelta(2) from_date = at_date - timedelta(self.default_days_back) graph_res = self.default_interval count = [] # Calculate 0th point last_date = from_date - timedelta(graph_res) # Calculate remaining points for cur_date in daterange(from_date, at_date, graph_res): datestr = format_date(cur_date) if graph_res != 1: datestr = "%s thru %s" % (format_date(last_date), datestr) if (not req_content == None) and (req_content == "ticketchartdata"): num_total = self._get_num_tickets_total( beginning, cur_date, testplan, req) num_closed = self._get_num_tickets_by_status( beginning, cur_date, 'closed', testplan, req) num_active = num_total - num_closed count.append({ 'from_date': format_date(last_date), 'to_date': datestr, 'date': datestr, 'active_tickets': num_active, 'closed_tickets': num_closed, 'tot_tickets': num_total }) else: # Handling custom test case outcomes here num_new = self._get_num_testcases(last_date, cur_date, catpath, req) num_successful = 0 for tc_outcome in tc_statuses['green']: num_successful += self._get_num_tcs_by_status( last_date, cur_date, tc_outcome, testplan, req) num_failed = 0 for tc_outcome in tc_statuses['red']: num_failed += self._get_num_tcs_by_status( last_date, cur_date, tc_outcome, testplan, req) num_all_successful = 0 for tc_outcome in tc_statuses['green']: num_all_successful += self._get_num_tcs_by_status( from_date, cur_date, tc_outcome, testplan, req) num_all_failed = 0 for tc_outcome in tc_statuses['red']: num_all_failed += self._get_num_tcs_by_status( from_date, cur_date, tc_outcome, testplan, req) num_all = 0 num_all_untested = 0 if testplan_contains_all: num_all = self._get_num_testcases(None, cur_date, catpath, req) num_all_untested = num_all - num_all_successful - num_all_failed else: for tc_outcome in tc_statuses['yellow']: num_all_untested += self._get_num_tcs_by_status( from_date, cur_date, tc_outcome, testplan, req) num_all = num_all_untested + num_all_successful + num_all_failed count.append({ 'from_date': format_date(last_date), 'to_date': datestr, 'date': datestr, 'new_tcs': num_new, 'successful': num_successful, 'failed': num_failed, 'all_tcs': num_all, 'all_successful': num_all_successful, 'all_untested': num_all_untested, 'all_failed': num_all_failed }) last_date = cur_date # if chartdata is requested, raw text is returned rather than data object # for templating if (not req_content == None) and (req_content == "chartdata"): jsdstr = '{"chartdata": [\n' for x in count: jsdstr += '{"date": "%s",' % x['date'] jsdstr += ' "new_tcs": %s,' % x['new_tcs'] jsdstr += ' "successful": %s,' % x['successful'] jsdstr += ' "failed": %s,' % x['failed'] jsdstr += ' "all_tcs": %s,' % x['all_tcs'] jsdstr += ' "all_successful": %s,' % x['all_successful'] jsdstr += ' "all_untested": %s,' % x['all_untested'] jsdstr += ' "all_failed": %s},\n' % x['all_failed'] jsdstr = jsdstr[:-2] + '\n]}' if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return elif (not req_content == None) and (req_content == "downloadcsv"): csvstr = "Date from;Date to;New Test Cases;Successful;Failed;Total Test Cases;Total Successful;Total Untested;Total Failed\r\n" for x in count: csvstr += '%s;' % x['from_date'] csvstr += '%s;' % x['to_date'] csvstr += '%s;' % x['new_tcs'] csvstr += '%s;' % x['successful'] csvstr += '%s;' % x['failed'] csvstr += '%s;' % x['all_tcs'] csvstr += '%s;' % x['all_successful'] csvstr += '%s;' % x['all_untested'] csvstr += '%s\r\n' % x['all_failed'] if isinstance(csvstr, unicode): csvstr = csvstr.encode('utf-8') req.send_header("Content-Length", len(csvstr)) req.send_header("Content-Disposition", "attachment;filename=Test_stats.csv") req.write(csvstr) return elif (not req_content == None) and (req_content == "ticketchartdata"): jsdstr = '{"ticketchartdata": [\n' for x in count: jsdstr += '{"date": "%s",' % x['date'] jsdstr += ' "tot_tickets": %s,' % x['tot_tickets'] jsdstr += ' "active_tickets": %s,' % x['active_tickets'] jsdstr += ' "closed_tickets": %s},\n' % x['closed_tickets'] jsdstr = jsdstr[:-2] + '\n]}' if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return else: # Normal rendering of first chart db = self.env.get_db_cnx() showall = req.args.get('show') == 'all' testplan_list = [] for planid, catid, catpath, name, author, ts_str in testmanagersystem.list_all_testplans( ): testplan_list.append({ 'planid': planid, 'catpath': catpath, 'name': name }) data = {} data['testcase_data'] = count data['start_date'] = format_date(from_date) data['end_date'] = format_date(at_date) data['resolution'] = str(graph_res) data['baseurl'] = req.base_url data['testplans'] = testplan_list data['ctestplan'] = testplan return 'testmanagerstats.html', data, None
def get_admin_panels(self, req): if req.perm.has_permission('TRAC_ADMIN'): yield('testmanager', 'Test Manager', 'settings', _("Settings")) yield('testmanager', 'Test Manager', 'templates', _("Templates"))
def get_all_table_columns_for_object(env, objtype, settings): genericClassModelProvider = GenericClassModelProvider(env) tcat_fields = genericClassModelProvider.get_custom_fields_for_realm('testcatalog') tcat_has_custom = tcat_fields is not None and len(tcat_fields) > 0 tc_fields = genericClassModelProvider.get_custom_fields_for_realm('testcase') tc_has_custom = tc_fields is not None and len(tc_fields) > 0 if objtype == 'testplan': tcip_fields = genericClassModelProvider.get_custom_fields_for_realm('testcaseinplan') tcip_has_custom = tcip_fields is not None and len(tcip_fields) > 0 else: tcip_fields = False tcip_has_custom = None custom_ctx = { 'testcatalog': [tcat_has_custom, tcat_fields], 'testcase': [tc_has_custom, tc_fields], 'testcaseinplan': [tcip_has_custom, tcip_fields] } result = [] result_map = {} # Common columns result.append({'name': 'title', 'label': _("Name"), 'visible': _is_column_visible(objtype, 'title', settings), 'totals': _get_column_total_operation(objtype, 'title', settings)}) # Custom testcatalog columns if tcat_has_custom: for f in tcat_fields: if f['type'] == 'text': result.append(_get_column_settings(objtype, f, settings)) # Base testcase columns result.append({'name': 'id', 'label': _("ID"), 'visible': _is_column_visible(objtype, 'id', settings), 'totals': _get_column_total_operation(objtype, 'id', settings)}) # Custom testcase columns if tc_has_custom: for f in tc_fields: if f['type'] == 'text': result.append(_get_column_settings(objtype, f, settings)) if objtype == 'testplan': # Base testcaseinplan columns result.append({'name': 'status', 'label': _("Status"), 'visible': _is_column_visible(objtype, 'status', settings), 'totals': _get_column_total_operation(objtype, 'status', settings)}) result.append({'name': 'author', 'label': _("Author"), 'visible': _is_column_visible(objtype, 'author', settings), 'totals': _get_column_total_operation(objtype, 'author', settings)}) result.append({'name': 'time', 'label': _("Last Change"), 'visible': _is_column_visible(objtype, 'time', settings), 'totals': _get_column_total_operation(objtype, 'time', settings)}) # Custom testcaseinplan columns if tcip_has_custom: for f in tcip_fields: if f['type'] == 'text': result.append(_get_column_settings(objtype, f, settings)) # Full test case description result.append({'name': 'description', 'label': _("Description"), 'visible': _is_column_visible(objtype, 'description', settings), 'totals': _get_column_total_operation(objtype, 'description', settings)}) for r in result: result_map[r['name']] = r return result, result_map, custom_ctx
def _render_templates(self, req, cat, page, component): req.perm.assert_permission('TRAC_ADMIN') for key, value in req.args.items(): self.env.log.debug("Key: %s, Value: %s", key, value) testmanagersystem = TestManagerSystem(self.env) context = Context.from_request(req) data = {} data['template_overview'] = True data['edit_template'] = False data['tc_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCASE) data['tcat_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCATALOG) data['tcat_list'] = testmanagersystem.get_testcatalogs() data['tcat_selected'] = testmanagersystem.get_default_tcat_template_id() if req.method == 'POST': # add a Test Case template? if req.args.get('tc_add'): tc_name = req.args.get('tc_add_name') self.env.log.debug("Add new TC-template: %s" % tc_name) if len(tc_name) > 0: if testmanagersystem.template_exists(tc_name, testmanagersystem.TEMPLATE_TYPE_TESTCASE): data['tc_add_name'] = tc_name add_warning(req, _("A Test Case template with that name already exists")) else: data['template_overview'] = False data['edit_template'] = True data['t_edit_type'] = testmanagersystem.TEMPLATE_TYPE_TESTCASE data['t_edit_name'] = tc_name data['t_edit_action'] = 'ADD' else: add_warning(req, _("Please enter a Template name first")) # add a Test Catalog template? if req.args.get('tcat_add'): tcat_name = req.args.get('tcat_add_name') self.env.log.debug("Add new TCat-template: %s" % tcat_name) if len(tcat_name) > 0: if testmanagersystem.template_exists(tcat_name, testmanagersystem.TEMPLATE_TYPE_TESTCATALOG): data['tcat_add_name'] = tcat_name add_warning(req, _("A Test Catalog template with that name already exists")) else: data['template_overview'] = False data['edit_template'] = True data['t_edit_type'] = testmanagersystem.TEMPLATE_TYPE_TESTCATALOG data['t_edit_name'] = tcat_name data['t_edit_action'] = 'ADD' else: add_warning(req, _("Please enter a Template name first")) # delete a Test Case template? if req.args.get('tc_del'): tc_sel = req.args.get('tc_sel') for t_id in tc_sel: t = testmanagersystem.get_template_by_id(t_id) if testmanagersystem.template_in_use(t_id): add_warning(req, _("Template '%s' not removed as it is in use for a Test Catalog") % t['name']) continue self.env.log.debug("remove test case template with id: " + t_id) if not testmanagersystem.remove_template(t_id): add_warning(req, _("Error deleting Test Case template '%s'") % t['name']) else: add_notice(req, _("Test Case template '%s' deleted") % t['name']) data['tc_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCASE) data['tcat_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCATALOG) # delete a Test Catalog template? if req.args.get('tcat_del'): tcat_sel = req.args.get('tcat_sel') tcat_default = testmanagersystem.get_default_tcat_template_id() for t_id in tcat_sel: t = testmanagersystem.get_template_by_id(t_id) if t_id == tcat_default: add_warning(req, _("Template '%s' not removed as it is currently the default template") % t['name']) continue self.env.log.debug("remove test catalog template with id: " + t_id) if not testmanagersystem.remove_template(t_id): add_warning(req, _("Error deleting Test Catalog template '%s'") % t['name']) else: add_notice(req, _("Test Catalog template '%s' deleted") % t['name']) data['tc_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCASE) data['tcat_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCATALOG) # save default Test Catalog template if req.args.get('tcat_default_save'): tcat_default = req.args.get('tcat_default') if testmanagersystem.set_config_property('TEST_CATALOG_DEFAULT_TEMPLATE', tcat_default): add_notice(req, _("Default Test Catalog template updated")) data['tcat_selected'] = tcat_default else: add_warning(req, _("Failed to update default Test Catalog template")) # save templates for TestCatalogs if req.args.get('tc_templates_save'): warning = False for key, value in req.args.items(): self.env.log.debug("checking key: " + key) if 'TC_TEMPLATE_FOR_TCAT_' in key: self.env.log.debug("saving tc-template for: %s, value: %s" % (key, value)) if not testmanagersystem.set_config_property(key, value): warning = True if warning: add_warning(req, _("Failed to update Test Case templates")) else: add_notice(req, _("Default Test Case templates updated")) data['tcat_list'] = testmanagersystem.get_testcatalogs() # preview template if req.args.get('t_edit_preview'): data['template_overview'] = False data['edit_template'] = True data['t_edit_id'] = req.args.get('t_edit_id') data['t_edit_type'] = req.args.get('t_edit_type') data['t_edit_name'] = req.args.get('t_edit_name') data['t_edit_description'] = req.args.get('t_edit_description') data['t_edit_content'] = req.args.get('t_edit_content') data['t_edit_action'] = req.args.get('t_edit_action') data['t_show_preview'] = True data['t_preview_content'] = format_to_html(self.env, context, req.args.get('t_edit_content')) # save an edited template? if req.args.get('t_edit_save'): t_id = req.args.get('t_edit_id') t_type = req.args.get('t_edit_type') t_name = req.args.get('t_edit_name') t_desc = req.args.get('t_edit_description') t_cont = req.args.get('t_edit_content') t_action = req.args.get('t_edit_action') testmanagersystem.save_template(t_id, t_name, t_type, t_desc, t_cont, t_action) data['template_overview'] = True data['edit_template'] = False data['tc_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCASE) data['tcat_templates'] = testmanagersystem.get_templates(testmanagersystem.TEMPLATE_TYPE_TESTCATALOG) add_notice(req, _("Template saved")) else: # method 'GET' (template selected for 'edit') if component: t_type = req.args.get('t_type') t_id = component self.env.log.debug("component: " + component) template = testmanagersystem.get_template_by_id(t_id) data['t_edit_id'] = template['id'] data['t_edit_type'] = template['type'] data['t_edit_name'] = template['name'] data['t_edit_description'] = template['description'] data['t_edit_content'] = template['content'] data['t_edit_action'] = 'EDIT' data['template_overview'] = False data['edit_template'] = True add_stylesheet(req, 'common/css/wiki.css') add_stylesheet(req, 'testmanager/css/admin.css') return 'admin_templates.html', data
def process_request(self, req): testmanagersystem = TestManagerSystem(self.env) tc_statuses = testmanagersystem.get_tc_statuses_by_color() if 'testmanager' in self.config: self.default_days_back = self.config.getint('testmanager', 'default_days_back', TESTMANAGER_DEFAULT_DAYS_BACK) self.default_interval = self.config.getint('testmanager', 'default_interval', TESTMANAGER_DEFAULT_INTERVAL) req_content = req.args.get('content') testplan = None catpath = None testplan_contains_all = True self.env.log.debug("Test Stats - process_request: %s" % req_content) grab_testplan = req.args.get('testplan') if grab_testplan and not grab_testplan == "__all": testplan = grab_testplan.partition('|')[0] catpath = grab_testplan.partition('|')[2] tp = TestPlan(self.env, testplan, catpath) testplan_contains_all = tp['contains_all'] today = datetime.today() today = today.replace(tzinfo = req.tz)+timedelta(2) # Stats start from two years back beginning = today - timedelta(720) if (not req_content == None) and (req_content == "piechartdata"): num_successful = 0 for tc_outcome in tc_statuses['green']: num_successful += self._get_num_tcs_by_status(beginning, today, tc_outcome, testplan, req) num_failed = 0 for tc_outcome in tc_statuses['red']: num_failed += self._get_num_tcs_by_status(beginning, today, tc_outcome, testplan, req) num_to_be_tested = 0 if testplan_contains_all: num_to_be_tested = self._get_num_testcases(beginning, today, catpath, req) - num_successful - num_failed else: for tc_outcome in tc_statuses['yellow']: num_to_be_tested += self._get_num_tcs_by_status(beginning, today, tc_outcome, testplan, req) jsdstr = """ [ {"response": "%s", "count": %s}, {"response": "%s", "count": %s}, {"response": "%s", "count": %s} ] """ % (_("Successful"), num_successful, _("Failed"), num_failed, _("To be tested"), num_to_be_tested) jsdstr = jsdstr.strip() if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return if not None in [req.args.get('end_date'), req.args.get('start_date'), req.args.get('resolution')]: # form submit grab_at_date = req.args.get('end_date') grab_from_date = req.args.get('start_date') grab_resolution = req.args.get('resolution') # validate inputs if None in [grab_at_date, grab_from_date]: raise TracError('Please specify a valid range.') if None in [grab_resolution]: raise TracError('Please specify the graph interval.') if 0 in [len(grab_at_date), len(grab_from_date), len(grab_resolution)]: raise TracError('Please ensure that all fields have been filled in.') if not grab_resolution.isdigit(): raise TracError('The graph interval field must be an integer, days.') at_date = parse_date(grab_at_date, req.tz)+timedelta(2) from_date = parse_date(grab_from_date, req.tz) graph_res = int(grab_resolution) else: # default data todays_date = datetime.today() at_date = todays_date #+ timedelta(1) # datetime.combine(todays_date,time(23,59,59,0,req.tz)) at_date = at_date.replace(tzinfo = req.tz)+timedelta(2) from_date = at_date - timedelta(self.default_days_back) graph_res = self.default_interval count = [] # Calculate 0th point last_date = from_date - timedelta(graph_res) # Calculate remaining points for cur_date in daterange(from_date, at_date, graph_res): datestr = format_date(cur_date) if graph_res != 1: datestr = "%s thru %s" % (format_date(last_date), datestr) if (not req_content == None) and (req_content == "ticketchartdata"): num_total = self._get_num_tickets_total(beginning, cur_date, testplan, req) num_closed = self._get_num_tickets_by_status(beginning, cur_date, 'closed', testplan, req) num_active = num_total - num_closed count.append( {'from_date': format_date(last_date), 'to_date': datestr, 'date' : datestr, 'active_tickets' : num_active, 'closed_tickets': num_closed, 'tot_tickets' : num_total} ) else: # Handling custom test case outcomes here num_new = self._get_num_testcases(last_date, cur_date, catpath, req) num_successful = 0 for tc_outcome in tc_statuses['green']: num_successful += self._get_num_tcs_by_status(last_date, cur_date, tc_outcome, testplan, req) num_failed = 0 for tc_outcome in tc_statuses['red']: num_failed += self._get_num_tcs_by_status(last_date, cur_date, tc_outcome, testplan, req) num_all_successful = 0 for tc_outcome in tc_statuses['green']: num_all_successful += self._get_num_tcs_by_status(from_date, cur_date, tc_outcome, testplan, req) num_all_failed = 0 for tc_outcome in tc_statuses['red']: num_all_failed += self._get_num_tcs_by_status(from_date, cur_date, tc_outcome, testplan, req) num_all = 0 num_all_untested = 0 if testplan_contains_all: num_all = self._get_num_testcases(None, cur_date, catpath, req) num_all_untested = num_all - num_all_successful - num_all_failed else: for tc_outcome in tc_statuses['yellow']: num_all_untested += self._get_num_tcs_by_status(from_date, cur_date, tc_outcome, testplan, req) num_all = num_all_untested + num_all_successful + num_all_failed count.append( {'from_date': format_date(last_date), 'to_date': datestr, 'date' : datestr, 'new_tcs' : num_new, 'successful': num_successful, 'failed': num_failed, 'all_tcs' : num_all, 'all_successful': num_all_successful, 'all_untested': num_all_untested, 'all_failed': num_all_failed }) last_date = cur_date # if chartdata is requested, raw text is returned rather than data object # for templating if (not req_content == None) and (req_content == "chartdata"): jsdstr = '{"chartdata": [\n' for x in count: jsdstr += '{"date": "%s",' % x['date'] jsdstr += ' "new_tcs": %s,' % x['new_tcs'] jsdstr += ' "successful": %s,' % x['successful'] jsdstr += ' "failed": %s,' % x['failed'] jsdstr += ' "all_tcs": %s,' % x['all_tcs'] jsdstr += ' "all_successful": %s,' % x['all_successful'] jsdstr += ' "all_untested": %s,' % x['all_untested'] jsdstr += ' "all_failed": %s},\n' % x['all_failed'] jsdstr = jsdstr[:-2] +'\n]}' if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return elif (not req_content == None) and (req_content == "downloadcsv"): csvstr = "Date from;Date to;New Test Cases;Successful;Failed;Total Test Cases;Total Successful;Total Untested;Total Failed\r\n" for x in count: csvstr += '%s;' % x['from_date'] csvstr += '%s;' % x['to_date'] csvstr += '%s;' % x['new_tcs'] csvstr += '%s;' % x['successful'] csvstr += '%s;' % x['failed'] csvstr += '%s;' % x['all_tcs'] csvstr += '%s;' % x['all_successful'] csvstr += '%s;' % x['all_untested'] csvstr += '%s\r\n' % x['all_failed'] if isinstance(csvstr, unicode): csvstr = csvstr.encode('utf-8') req.send_header("Content-Length", len(csvstr)) req.send_header("Content-Disposition", "attachment;filename=Test_stats.csv") req.write(csvstr) return elif (not req_content == None) and (req_content == "ticketchartdata"): jsdstr = '{"ticketchartdata": [\n' for x in count: jsdstr += '{"date": "%s",' % x['date'] jsdstr += ' "tot_tickets": %s,' % x['tot_tickets'] jsdstr += ' "active_tickets": %s,' % x['active_tickets'] jsdstr += ' "closed_tickets": %s},\n' % x['closed_tickets'] jsdstr = jsdstr[:-2] +'\n]}' if isinstance(jsdstr, unicode): jsdstr = jsdstr.encode('utf-8') req.send_header("Content-Length", len(jsdstr)) req.write(jsdstr) return else: # Normal rendering of first chart db = self.env.get_db_cnx() showall = req.args.get('show') == 'all' testplan_list = [] for planid, catid, catpath, name, author, ts_str in testmanagersystem.list_all_testplans(): testplan_list.append({'planid': planid, 'catpath': catpath, 'name': name}) data = {} data['testcase_data'] = count data['start_date'] = format_date(from_date) data['end_date'] = format_date(at_date) data['resolution'] = str(graph_res) data['baseurl'] = req.base_url data['testplans'] = testplan_list data['ctestplan'] = testplan return 'testmanagerstats.html', data, None
def get_navigation_items(self, req): if "TEST_VIEW" in req.perm: yield ("mainnav", "testmanager", tag.a(_("Test Manager"), href=req.href.wiki() + "/TC", accesskey="M"))
def do_upgrade_environment(db): for realm in self.SCHEMA: realm_schema = self.SCHEMA[realm] if need_db_create_for_realm(self.env, realm, realm_schema, db): create_db_for_realm(self.env, realm, realm_schema, db) elif need_db_upgrade_for_realm(self.env, realm, realm_schema, db): upgrade_db_for_realm(self.env, 'testmanager.upgrades', realm, realm_schema, db) # Create default values for configuration properties and initialize counters db_insert_or_ignore(self.env, 'testconfig', 'NEXT_CATALOG_ID', '0', db) db_insert_or_ignore(self.env, 'testconfig', 'NEXT_TESTCASE_ID', '0', db) db_insert_or_ignore(self.env, 'testconfig', 'NEXT_PLAN_ID', '0', db) db.commit() # Fix templates with a blank id if self._check_blank_id_templates(db): self._fix_blank_id_templates(db) db.commit() self.env.log.info( _(""" Test Manager templates with blank IDs have been fixed.\n Please go to the Administration panel, in the Test Manager Templates section, and check the associations between templates and Test Cases and Test Catalogs.\n You will have to manually fix any misconfiguration you should find. """)) # Create the basic "TC" Wiki page, used as the root test catalog tc_page = WikiPage(self.env, 'TC') if not tc_page.exists: tc_page.text = ' ' tc_page.save('System', '', '127.0.0.1') db.commit() if self._need_upgrade(db): # Set custom ticket field to hold related test case custom = self.config['ticket-custom'] config_dirty = False if 'testcaseid' not in custom: custom.set('testcaseid', 'text') custom.set('testcaseid.label', _("Test Case")) config_dirty = True if 'planid' not in custom: custom.set('planid', 'text') custom.set('planid.label', _("Test Plan")) config_dirty = True # Set config section for test case outcomes if 'test-outcomes' not in self.config: self.config.set('test-outcomes', 'green.SUCCESSFUL', _("Successful")) self.config.set('test-outcomes', 'yellow.TO_BE_TESTED', _("Untested")) self.config.set('test-outcomes', 'red.FAILED', _("Failed")) self.config.set('test-outcomes', 'default', 'TO_BE_TESTED') config_dirty = True if config_dirty: self.config.save()
def get_navigation_items(self, req): if 'TEST_VIEW' in req.perm: yield ('mainnav', 'testmanager', tag.a(_("Test Manager"), href=req.href.wiki() + '/TC', accesskey='M'))