class TestRChecks(unittest.TestCase): def setUp(self): sys.argv = ['fedora-review','-rpn','R-Rdummypkg'] os.chdir('test-R') if os.path.exists('R-Rdummypkg'): shutil.rmtree('R-Rdummypkg') Settings.init(True) ReviewDirs.reset() @unittest.skipIf(no_net, 'No network available') def test_all_checks(self): ''' Run all automated review checks''' self.bug = NameBug('R-Rdummypkg') self.bug.find_urls() self.bug.download_files() self.checks = Checks(self.bug.spec_file, self.bug.srpm_file) self.checks.run_checks(writedown=False) for check in self.checks.checks: if check.is_applicable(): self.assertTrue(check.header == 'Generic' or check.header == 'R') result = check.get_result() self.assertTrue(result.result in ['pass', 'pending', 'fail']) os.chdir('..')
def test_all_checks(self): ''' Run all automated review checks''' checks = Checks(self.spec, self.srpm) checks.run_checks(writedown=False) checkdict = checks.get_checks() for check in checkdict.itervalues(): self.assertTrue(check.is_run)
class TestChecks(unittest.TestCase): def setUp(self): self.startdir = os.getcwd() sys.argv = ['test-checks','-b','1234'] Settings.init(True) ReviewDirs.reset() ReviewDirs.workdir_setup('.', True) if not os.path.exists(TEST_WORK_DIR): os.makedirs(TEST_WORK_DIR) self.checks = None self.srpm = TEST_WORK_DIR + os.path.basename(TEST_SRPM) self.spec = TEST_WORK_DIR + os.path.basename(TEST_SPEC) self.source = TEST_WORK_DIR + os.path.basename(TEST_SRC) helper = Helpers() helper._get_file(TEST_SRPM, TEST_WORK_DIR) helper._get_file(TEST_SRC, TEST_WORK_DIR) helper._get_file(TEST_SPEC, TEST_WORK_DIR) del helper @unittest.skipIf(no_net, 'No network available') def test_all_checks(self): ''' Run all automated review checks''' self.checks = Checks(self.spec, self.srpm) self.checks.run_checks(writedown=False) # Automatic Checks checks = self.checks.checks for check in checks: result = check.get_result() self.assertNotEqual(result, None) os.chdir(self.startdir)
class TestChecks(unittest.TestCase): def setUp(self): self.startdir = os.getcwd() sys.argv = ['test-checks', '-b', '1234'] Settings.init(True) ReviewDirs.reset() ReviewDirs.workdir_setup('.', True) if not os.path.exists(TEST_WORK_DIR): os.makedirs(TEST_WORK_DIR) self.checks = None self.srpm = TEST_WORK_DIR + os.path.basename(TEST_SRPM) self.spec = TEST_WORK_DIR + os.path.basename(TEST_SPEC) self.source = TEST_WORK_DIR + os.path.basename(TEST_SRC) helper = Helpers() helper._get_file(TEST_SRPM, TEST_WORK_DIR) helper._get_file(TEST_SRC, TEST_WORK_DIR) helper._get_file(TEST_SPEC, TEST_WORK_DIR) del helper @unittest.skipIf(no_net, 'No network available') def test_all_checks(self): ''' Run all automated review checks''' self.checks = Checks(self.spec, self.srpm) self.checks.run_checks(writedown=False) # Automatic Checks checks = self.checks.checks for check in checks: result = check.get_result() self.assertNotEqual(result, None) os.chdir(self.startdir)
def test_exclude(self): """ test --exclude/-x option. """ self.init_test(["fedora-review", "-n", "python-test", "-x", "CheckRequires"]) bug = NameBug(Settings.name) bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) self.assertFalse("CheckRequires" in checks.get_checks())
def run_single_check(self, bug, the_check): bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) checks.set_single_check(the_check) self.assertEqual(len(checks.checks), 1) check = checks.checks[0] check.run() return check
def test_all_checks(self): ''' Run all automated review checks''' self.checks = Checks(self.spec, self.srpm) self.checks.run_checks(writedown=False) # Automatic Checks checks = self.checks.checks for check in checks: result = check.get_result() self.assertNotEqual(result, None) os.chdir(self.startdir)
def test_exclude(self): ''' test --exclude/-x option. ''' self.init_test(['fedora-review', '-n','python-test', '-x', 'CheckRequires']) bug = NameBug(Settings.name) bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) self.assertFalse('CheckRequires' in checks.get_checks())
def run_single_check(self, bug, check_name, run_build=False): # Run a single check, return check. bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file).get_checks() checks.set_single_check(check_name) if run_build: checks['CheckBuild'].run() check = checks[check_name] check.run() return check
def test_all_checks(self): ''' Run all automated review checks''' self.bug = NameBug('R-Rdummypkg') self.bug.find_urls() self.bug.download_files() self.checks = Checks(self.bug.spec_file, self.bug.srpm_file) self.checks.run_checks(writedown=False) for check in self.checks.checks: if check.is_applicable(): self.assertTrue(check.header == 'Generic' or check.header == 'R') result = check.get_result() self.assertTrue(result.result in ['pass', 'pending', 'fail']) os.chdir('..')
def test_md5sum_diff_fail(self): self.init_test('md5sum-diff-fail', argv=['-rpn','python-test', '--cache', '--no-build']) ReviewDirs.reset(os.getcwd()) bug = NameBug('python-test') bug.find_urls() checks = Checks(bug.spec_file, bug.srpm_file).get_checks() checks.set_single_check('CheckSourceMD5') check = checks['CheckSourceMD5'] check.run() self.assertTrue(check.is_failed) expected = 'diff -r also reports differences' self.assertTrue(expected in check.result.attachments[0].text)
def test_sources(self): self.init_test('test_misc', argv=['-n','python-test', '--cache', '--no-build']) bug = NameBug('python-test') bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file).get_checks() checks.set_single_check('CheckSourceMD5') check = checks['CheckSourceMD5'] check.run() result = check.result self.log.debug('test_source, result : ' + result.result) if result.output_extra: self.log.debug("Result extra text: " + result.output_extra) self.assertTrue(check.is_passed)
def __run_checks(self, spec, srpm): self.checks = Checks(spec, srpm) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile, "w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not os.path.exists('BUILD'): os.symlink(Mock.get_builddir('BUILD'), 'BUILD') if not Settings.no_report: print "Review in: " + self.outfile
def test_md5sum_diff_fail(self): os.chdir('md5sum-diff-fail') sys.argv = ['fedora-review','-rpn','python-test'] Settings.init(True) ReviewDirs.reset() if os.path.exists('python-test'): shutil.rmtree('python-test') bug = NameBug('python-test') bug.find_urls() checks = Checks(bug.spec_file, bug.srpm_file) checks.set_single_check('CheckSourceMD5') self.assertEqual(len(checks.checks), 1) check = checks.checks[0] check.run() self.assertEqual(check.state, 'fail') expected = 'diff -r also reports differences' self.assertTrue(expected in check.attachments[0].text) os.chdir(self.startdir)
def test_md5sum_diff_fail(self): os.chdir('md5sum-diff-fail') sys.argv = ['fedora-review', '-rpn', 'python-test'] Settings.init(True) ReviewDirs.reset() if os.path.exists('python-test'): shutil.rmtree('python-test') bug = NameBug('python-test') bug.find_urls() checks = Checks(bug.spec_file, bug.srpm_file) checks.set_single_check('CheckSourceMD5') self.assertEqual(len(checks.checks), 1) check = checks.checks[0] check.run() self.assertEqual(check.state, 'fail') expected = 'diff -r also reports differences' self.assertTrue(expected in check.attachments[0].text) os.chdir(self.startdir)
def test_dirty_resultdir(self): self.init_test('test_misc', argv=['-n','python-test', '--cache']) bug = NameBug('python-test') bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file).get_checks() checks.set_single_check('CheckResultdir') check = checks['CheckResultdir'] for dirt in glob.glob('results/*.*'): os.unlink(dirt) check.run() self.assertTrue(check.is_passed) subprocess.check_call('touch results/orvar.rpm', shell=True) self.assertRaises(ReviewError, check.run) Settings.nobuild = True check.run() self.assertTrue(check.is_passed) os.unlink('results/orvar.rpm')
def test_sources(self): if os.path.exists('python-test'): shutil.rmtree('python-test') sys.argv = ['fedora-review', '-n', 'python-test'] Settings.init(True) ReviewDirs.reset() ReviewDirs.workdir_setup('.', True) bug = NameBug('python-test') bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) checks.set_single_check('CheckSourceMD5') self.assertEqual(len(checks.checks), 1) check = checks.checks[0] check.run() result = check.get_result() self.log.debug('result : ' + result.result) if result.output_extra: self.log.debug("Result extra text: " + result.output_extra) self.assertEqual(result.result, 'pass') os.chdir(self.startdir)
def test_sources(self): if os.path.exists('python-test'): shutil.rmtree('python-test') sys.argv = ['fedora-review','-n','python-test'] Settings.init(True) ReviewDirs.reset() ReviewDirs.workdir_setup('.', True) bug = NameBug('python-test') bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) checks.set_single_check('CheckSourceMD5') self.assertEqual(len(checks.checks), 1) check = checks.checks[0] check.run() result = check.get_result() self.log.debug('result : ' + result.result) if result.output_extra: self.log.debug("Result extra text: " + result.output_extra) self.assertEqual( result.result, 'pass') os.chdir(self.startdir)
class TestRChecks(FR_TestCase): R_TEST_SRPM = 'https://fedorahosted.org/releases/F/e' \ '/FedoraReview/R-Rdummypkg-1.0-2.fc15.src.rpm' R_TEST_SPEC = FR_TestCase.BASE_URL + 'R-Rdummypkg.spec' R_TEST_SRC = FR_TestCase.BASE_URL + 'Rdummypkg_1.0.tar.gz' @unittest.skipIf(FAST_TEST, 'slow test disabled by REVIEW_FAST_TEST') def test_all_checks(self): ''' Run all automated review checks''' self.init_test('test-R', argv=['-rpn','R-Rdummypkg', '--no-build']) ReviewDirs.reset() self.bug = NameBug('R-Rdummypkg') self.bug.find_urls() self.bug.download_files() self.checks = Checks(self.bug.spec_file, self.bug.srpm_file) self.checks.run_checks(writedown=False) for check in self.checks.checkdict.itervalues(): if check.is_passed or check.is_pending or check.is_failed: self.assertIn(check.group, ['Generic', 'R'])
def test_all_checks(self): ''' Run all automated review checks''' self.init_test('test-R', argv=['-rpn','R-Rdummypkg', '--no-build']) ReviewDirs.reset() self.bug = NameBug('R-Rdummypkg') self.bug.find_urls() self.bug.download_files() self.checks = Checks(self.bug.spec_file, self.bug.srpm_file) self.checks.run_checks(writedown=False) for check in self.checks.checkdict.itervalues(): if check.is_passed or check.is_pending or check.is_failed: self.assertIn(check.group, ['Generic', 'R'])
def test_single(self): ''' test --single/-s option ''' self.init_test(['fedora-review', '-n','python-test', '-s', 'CheckRequires']) bug = NameBug(Settings.name) bug.find_urls() bug.download_files() checks = Checks(bug.spec_file, bug.srpm_file) self.assertEqual(len(checks.checks), 1) check = checks.checks[0] self.assertEqual(check.name, 'CheckRequires')
class TestRChecks(unittest.TestCase): def setUp(self): sys.argv = ['fedora-review', '-rpn', 'R-Rdummypkg'] os.chdir('test-R') if os.path.exists('R-Rdummypkg'): shutil.rmtree('R-Rdummypkg') Settings.init(True) ReviewDirs.reset() @unittest.skipIf(no_net, 'No network available') def test_all_checks(self): ''' Run all automated review checks''' self.bug = NameBug('R-Rdummypkg') self.bug.find_urls() self.bug.download_files() self.checks = Checks(self.bug.spec_file, self.bug.srpm_file) self.checks.run_checks(writedown=False) for check in self.checks.checks: if check.is_applicable(): self.assertTrue(check.header == 'Generic' or check.header == 'R') result = check.get_result() self.assertTrue(result.result in ['pass', 'pending', 'fail']) os.chdir('..')
def __run_checks(self, spec, srpm): self.checks = Checks(spec, srpm ) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile,"w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not os.path.exists('BUILD'): os.symlink(Mock.get_builddir('BUILD'), 'BUILD') if not Settings.no_report: print "Review in: " + self.outfile
def __run_checks(self, spec, srpm): ''' Register and run all checks. ''' self.checks = Checks(spec, srpm) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile, "w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not Settings.no_report: print "\033[92mReview template in: %s\033[0m" % self.outfile print "\033[91mfedora-review is automated tool, but *YOU* " \ "are responsible for manually reviewing the results " \ "and finishing the review. Do not just copy-paste the " \ "results without understanding them.\033[0m"
class ReviewHelper(object): ''' Make most of the actual work doing the review. ''' class HelperError(ReviewError): ''' Error while processing bug. ''' def __init__(self, msg): ReviewError.__init__(self, msg) def __init__(self): self.bug = None self.checks = None self.log = Settings.get_logger() self.verbose = False self.outfile = None self.prebuilt = False def __download_sources(self): ''' Download and extract all upstream sources. ''' self.sources.extract_all() return True def __do_report(self): ''' Create a review report''' self.log.info('Getting .spec and .srpm Urls from : ' + self.bug.get_location()) Settings.dump() if not self.bug.find_urls(): raise self.HelperError('Cannot find .spec or .srpm URL(s)') if not ReviewDirs.is_inited: wd = self.bug.get_dirname() ReviewDirs.workdir_setup(wd) if not self.bug.download_files(): raise self.HelperError('Cannot download .spec and .srpm') Settings.name = self.bug.get_name() self.__run_checks(self.bug.spec_file, self.bug.srpm_file) def __run_checks(self, spec, srpm): ''' Register and run all checks. ''' self.checks = Checks(spec, srpm) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile, "w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not Settings.no_report: print "\033[92mReview template in: %s\033[0m" % self.outfile print "\033[91mfedora-review is automated tool, but *YOU* " \ "are responsible for manually reviewing the results " \ "and finishing the review. Do not just copy-paste the " \ "results without understanding them.\033[0m" @staticmethod def _list_flags(): ''' List all flags in simple, user-friendly format. ''' checks_lister = ChecksLister() for flag in checks_lister.flags.itervalues(): print flag.name + ': ' + flag.doc @staticmethod def _list_checks(): """ List all the checks and flags available. """ def list_data_by_file(files, checks_list): ''' print filename + flags and checks defined in it. ''' for f in sorted(files): print 'File: ' + f flags_by_src = filter(lambda c: c.defined_in == f, checks_lister.flags.itervalues()) for flag in flags_by_src: print 'Flag: ' + flag.name files_per_src = filter(lambda c: c.defined_in == f, checks_list) groups = list(set([c.group for c in files_per_src])) for group in sorted(groups): def check_match(c): ''' check in correct group and file? ''' return c.group == group and c.defined_in == f checks = filter(check_match, checks_list) if checks == []: continue print 'Group: ' + group for c in sorted(checks): print ' %s: %s' % (c.name, c.text) print checks_lister = ChecksLister() checks_list = list(checks_lister.get_checks().itervalues()) files = list(set([c.defined_in for c in checks_list])) list_data_by_file(files, checks_list) deps_list = filter(lambda c: c.needs != [] and c.needs != ['CheckBuildCompleted'], checks_list) for dep in deps_list: print'Dependencies: ' + dep.name + ': ' + \ os.path.basename(dep.defined_in) for needed in dep.needs: print ' ' + needed deprecators = filter(lambda c: c.deprecates != [], checks_list) for dep in deprecators: print 'Deprecations: ' + dep.name + ': ' + \ os.path.basename(dep.defined_in) for victim in dep.deprecates: print ' ' + victim def _do_run(self): ''' Initiate, download url:s, run checks a write report. ''' Settings.init() make_report = True if Settings.list_checks: self._list_checks() make_report = False elif Settings.list_flags: self._list_flags() make_report = False elif Settings.version: _print_version() make_report = False elif Settings.url: self.log.info("Processing bug on url: " + Settings.url) self.bug = UrlBug(Settings.url) elif Settings.bug: self.log.info("Processing bugzilla bug: " + Settings.bug) self.bug = BugzillaBug(Settings.bug) elif Settings.name: self.log.info("Processing local files: " + Settings.name) self.bug = NameBug(Settings.name) if make_report: self.__do_report() def run(self): ''' Load urls, run checks and make report, ''' self.log.debug("Command line: " + ' '.join(sys.argv)) try: rcode = 0 self._do_run() except ReviewError as err: rcode = err.exitcode self.log.debug("ReviewError: " + str(err), exc_info=True) if not err.silent: msg = 'Error: ' + str(err) if err.show_logs: msg += ' (logs in ~/.cache/fedora-review.log)' self.log.error(msg) except: self.log.debug("Exception down the road...", exc_info=True) self.log.error('Exception down the road...' '(logs in ~/.cache/fedora-review.log)') rcode = 1 return rcode
class ReviewHelper(object): def __init__(self): self.bug = None self.checks = None self.log = Settings.get_logger() self.verbose = False self.outfile = None self.prebuilt = False def __download_sources(self): sources = self.checks.spec.get_sources('Source') self.sources = Sources(self.checks.spec) self.sources.extract_all() return True def __do_report(self): ''' Create a review report''' self.log.info('Getting .spec and .srpm Urls from : ' + self.bug.get_location()) Settings.dump() if not self.bug.find_urls(): self.log.error('Cannot find .spec or .srpm URL(s)') raise HandledError() if not ReviewDirs.is_inited: wd = self.bug.get_dirname() ReviewDirs.workdir_setup(wd) if not self.bug.download_files(): self.log.error('Cannot download .spec and .srpm') raise HandledError() Settings.name = self.bug.get_name() self.__run_checks(self.bug.spec_file, self.bug.srpm_file) def __list_checks(self): """ List all the checks available. """ ChecksLister().list() def __print_version(self): print('fedora-review version ' + __version__ + ' ' + build_full) def __run_checks(self, spec, srpm): self.checks = Checks(spec, srpm) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile, "w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not os.path.exists('BUILD'): os.symlink(Mock.get_builddir('BUILD'), 'BUILD') if not Settings.no_report: print "Review in: " + self.outfile def run(self): self.log.debug("Command line: " + ' '.join(sys.argv)) try: Settings.init() make_report = True if Settings.list_checks: self.__list_checks() make_report = False elif Settings.version: self.__print_version() make_report = False elif Settings.url: self.log.info("Processing bug on url: " + Settings.url) self.bug = UrlBug(Settings.url) elif Settings.bug: self.log.info("Processing bugzilla bug: " + Settings.bug) self.bug = BugzillaBug(Settings.bug, user=Settings.user) elif Settings.name: self.log.info("Processing local files: " + Settings.name) self.bug = NameBug(Settings.name) if make_report: self.__do_report() return 0 except BugException as err: print str(err) return 2 except HandledError as err: print str(err) return 2 except SettingsError as err: self.log.error("Incompatible settings: " + str(err)) return 2 except ReviewDirExistsError as err: print("The directory %s is in the way, please remove." % err.value) return 4 except CleanExitError as err: self.log.debug('Processing CleanExit') return 2 except: self.log.debug("Exception down the road...", exc_info=True) self.log.error("Exception down the road...") return 1 return 0
class ReviewHelper(object): def __init__(self): self.bug = None self.checks = None self.log = Settings.get_logger() self.verbose = False self.outfile = None self.prebuilt = False def __download_sources(self): sources = self.checks.spec.get_sources('Source') self.sources = Sources(self.checks.spec) self.sources.extract_all() return True def __do_report(self): ''' Create a review report''' self.log.info('Getting .spec and .srpm Urls from : ' + self.bug.get_location()) Settings.dump() if not self.bug.find_urls(): self.log.error( 'Cannot find .spec or .srpm URL(s)') raise HandledError() if not ReviewDirs.is_inited: wd = self.bug.get_dirname() ReviewDirs.workdir_setup(wd) if not self.bug.download_files(): self.log.error('Cannot download .spec and .srpm') raise HandledError() Settings.name = self.bug.get_name() self.__run_checks(self.bug.spec_file, self.bug.srpm_file) def __list_checks(self): """ List all the checks available. """ ChecksLister().list() def __print_version(self): print('fedora-review version ' + __version__ + ' ' + build_full) def __run_checks(self, spec, srpm): self.checks = Checks(spec, srpm ) if Settings.no_report: self.outfile = '/dev/null' else: self.outfile = ReviewDirs.report_path(self.checks.spec.name) with open(self.outfile,"w") as output: if Settings.nobuild: self.checks.srpm.is_build = True self.log.info('Running checks and generate report\n') self.checks.run_checks(output=output, writedown=not Settings.no_report) output.close() if not os.path.exists('BUILD'): os.symlink(Mock.get_builddir('BUILD'), 'BUILD') if not Settings.no_report: print "Review in: " + self.outfile def run(self): self.log.debug( "Command line: " + ' '.join(sys.argv)) try: Settings.init() make_report = True if Settings.list_checks: self.__list_checks() make_report = False elif Settings.version: self.__print_version() make_report = False elif Settings.url: self.log.info("Processing bug on url: " + Settings.url ) self.bug = UrlBug(Settings.url) elif Settings.bug: self.log.info("Processing bugzilla bug: " + Settings.bug ) self.bug = BugzillaBug(Settings.bug, user=Settings.user) elif Settings.name: self.log.info("Processing local files: " + Settings.name ) self.bug = NameBug(Settings.name) if make_report: self.__do_report() return 0 except BugException as err: print str(err) return 2 except HandledError as err: print str(err) return 2 except SettingsError as err: self.log.error("Incompatible settings: " + str(err)) return 2 except ReviewDirExistsError as err: print("The directory %s is in the way, please remove." % err.value) return 4 except CleanExitError as err: self.log.debug('Processing CleanExit') return 2 except: self.log.debug("Exception down the road...", exc_info=True) self.log.error("Exception down the road...") return 1 return 0