Exemplo n.º 1
0
    def __init__(self, **kwargs):
        verbosity = kwargs.pop('verbosity', 1)
        if verbosity != 2:
            logger.disable('DEBUG')
        if kwargs.get('testRunner') is None:
            kwargs['testRunner'] = TestRunner(verbosity=verbosity)

        unittest.TestProgram.__init__(self, **kwargs)
    def __init__(self, **kwargs):
        verbosity = kwargs.pop('verbosity', 1)
        if verbosity != 2:
            logger.disable('DEBUG')
        if kwargs.get('testRunner') is None:
            kwargs['testRunner'] = TestRunner(verbosity=verbosity)

        unittest.TestProgram.__init__(self, **kwargs)
Exemplo n.º 3
0
def run(targets, options):
    "Imports and runs the modules names that are contained in the 'targets'"
    
    success = errors = skipped = 0

    import_modules = [ 'doc_tests' ]
    # run the tests by importing the module and getting its test suite
    for name in targets:
        try:

            if name in import_modules:
                mod = __import__(name)
                suite = mod.get_suite()
            else:
                loader = unittest.TestLoader()
                suite = loader.loadTestsFromName(name)

            runner = TestRunner(verbosity=options.verbosity,
                                    descriptions=0)
            
            results = runner.run(suite)
            
            # count tests and errors
            success += results.testsRun - \
                       len(results.errors) - \
                       len(results.failures) - \
                       len(results.skipped)
            
            errors  += len(results.errors) + len(results.failures)
            skipped += len(results.skipped)

            # if we're in strict mode stop on errors
            if options.strict and errors:
                testutil.error( "strict mode stops on errors" )
                break

        except ImportError:
            testutil.error( "unable to import module '%s'" % name )

    # enable the logger
    logger.disable(None)

    # summarize the run
    testutil.info('=' * 59)
    testutil.info('''%s tests passed, %s tests failed, %s tests skipped; %d total''' % \
        (success, errors, skipped, success + errors + skipped))

    return (success, errors, skipped)
Exemplo n.º 4
0
def run(targets, options):
    "Imports and runs the modules names that are contained in the 'targets'"

    success = errors = skipped = 0

    import_modules = ['doc_tests']
    # run the tests by importing the module and getting its test suite
    for name in targets:
        try:

            if name in import_modules:
                mod = __import__(name)
                suite = mod.get_suite()
            else:
                loader = unittest.TestLoader()
                suite = loader.loadTestsFromName(name)

            runner = TestRunner(verbosity=options.verbosity, descriptions=0)

            results = runner.run(suite)

            # count tests and errors
            success += results.testsRun - \
                       len(results.errors) - \
                       len(results.failures) - \
                       len(results.skipped)

            errors += len(results.errors) + len(results.failures)
            skipped += len(results.skipped)

            # if we're in strict mode stop on errors
            if options.strict and errors:
                testutil.error("strict mode stops on errors")
                break

        except ImportError:
            testutil.error("unable to import module '%s'" % name)

    # enable the logger
    logger.disable(None)

    # summarize the run
    testutil.info('=' * 59)
    testutil.info('''%s tests passed, %s tests failed, %s tests skipped; %d total''' % \
        (success, errors, skipped, success + errors + skipped))

    return (success, errors, skipped)
Exemplo n.º 5
0
        '-x', '--index', action="store_true", 
        dest="index", default=False,
        help="treat input file as binary index"
    )

    return parser


if __name__ == '__main__':
    import optparse

    parser = option_parser()

    options, args = parser.parse_args()

    logger.disable(options.verbosity)

    from genetrack import conf

    # trigger test mode
    if options.test:
        options.inpname = conf.testdata('test-hdflib-input.gtrack')
        options.outname = conf.testdata('predictions.bed')

    # missing input file name
    if not options.inpname and not options.outname:
        parser.print_help()
    else:
        print 'Sigma = %s' % options.sigma
        print 'Minimum peak = %s' % options.level
        print 'Peak-to-peak = %s' % options.exclude
Exemplo n.º 6
0
import os, unittest, random

import testlib
from genetrack import conf, util, logger

class Hdflib( unittest.TestCase ):
    'basic sequence class tests'
    
    def test_all(self):
        "Testing sequence operations"
        #self.assertEqual(1, 0)

def get_suite():
    "Returns the testsuite"
    tests  = [ 
        Hdflib,
    ]

    return testlib.make_suite( tests )

if __name__ == '__main__':
    suite = get_suite()
    logger.disable(None)
    unittest.TextTestRunner(verbosity=2).run( suite )

    
Exemplo n.º 7
0
from genetrack.scripts import tabs2genetrack
from genetrack import logger

if __name__ == "__main__":
    import os
    os.environ['LC_ALL'] = 'C'
    #os.system( 'export' )

    parser = tabs2genetrack.option_parser()

    options, args = parser.parse_args()

    # uppercase the format
    options.format = options.format.upper()

    if options.format not in ('BED', 'GFF'):
        sys.stdout = sys.stderr
        parser.print_help()
        sys.exit(-1)

    logger.disable(options.verbosity)

    # missing file names
    if not (options.inpname and options.outname and options.format):
        parser.print_help()
        sys.exit(-1)
    else:
        tabs2genetrack.transform(inpname=options.inpname, outname=options.outname,\
            format=options.format, shift=options.shift, index=options.index, options=options)
Exemplo n.º 8
0
        data = authorize.create_data(user=john,
                                     pid=project.id,
                                     stream=stream,
                                     name="Test data")

        # project counts update
        project = authorize.get_project(user=john, pid=project.id)
        self.assertEqual(project.data_count, 1)

        # testing data deletion
        authorize.delete_data(user=john, pid=project.id, dids=[data.id])
        project = authorize.get_project(user=john, pid=project.id)
        self.assertEqual(project.data_count, 0)

    def test_two(self):
        pass


def get_suite():
    "Returns the testsuite"
    tests = [
        AuthorizeTest,
    ]

    return testlib.make_suite(tests)


if __name__ == '__main__':
    suite = get_suite()
    logger.disable('DEBUG')
    unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 9
0
import os, unittest, random

import testlib
from genetrack import conf, util, logger


class Hdflib(unittest.TestCase):
    'basic sequence class tests'

    def test_all(self):
        "Testing sequence operations"
        #self.assertEqual(1, 0)


def get_suite():
    "Returns the testsuite"
    tests = [
        Hdflib,
    ]

    return testlib.make_suite(tests)


if __name__ == '__main__':
    suite = get_suite()
    logger.disable(None)
    unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 10
0
    # parse the options
    options, args = parser.parse_args()

    # modules: from command line args or all modules
    targets = args or all_tests()

    # get rid of the .py ending in case full module names were
    # passed in the command line
    targets = [t.rstrip(".py") for t in targets]

    # exclusion mode
    if options.exclude:
        targets = [name for name in all_tests() if name not in targets]

    if options.verbosity == 0:
        logger.disable('INFO')
    elif options.verbosity == 1:
        logger.disable('DEBUG')
    elif options.verbosity >= 2:
        logger.disable(None)

    # cleans full entire test directory
    if options.reset:
        conf.reset_dir(conf.TEMP_DATA_DIR)

    # run all the tests
    if options.coverage:
        coverdir = conf.path_join(conf.TEST_DIR, 'coverage')
        good, bad, skip = testutil.generate_coverage(run,
                                                     coverdir,
                                                     targets=targets,
Exemplo n.º 11
0
        from genetrack.server.web import authorize

        john = User.objects.get(username='******')
        project = authorize.create_project(user=john, name="Test project")
        stream = File( open(conf.testdata('test-users.csv')) )
        data = authorize.create_data(user=john, pid=project.id, stream=stream, name="Test data")

        # project counts update
        project = authorize.get_project(user=john, pid=project.id)
        self.assertEqual(project.data_count, 1)

        # testing data deletion
        authorize.delete_data(user=john, pid=project.id, dids=[data.id])
        project = authorize.get_project(user=john, pid=project.id)
        self.assertEqual(project.data_count, 0)

    def test_two(self):
        pass

def get_suite():
    "Returns the testsuite"
    tests  = [ 
        AuthorizeTest,
    ]

    return testlib.make_suite( tests )

if __name__ == '__main__':
    suite = get_suite()
    logger.disable('DEBUG')
    unittest.TextTestRunner(verbosity=2).run( suite )
Exemplo n.º 12
0
    # parse the options
    options, args = parser.parse_args()

    # modules: from command line args or all modules
    targets = args or all_tests()

    # get rid of the .py ending in case full module names were 
    # passed in the command line
    targets = [ t.rstrip(".py") for t in targets ]

    # exclusion mode
    if options.exclude:
        targets = [ name for name in all_tests() if name not in targets ]

    if options.verbosity == 0:
        logger.disable('INFO')
    elif options.verbosity == 1:
        logger.disable('DEBUG')
    elif options.verbosity >= 2:
        logger.disable(None)

    # cleans full entire test directory
    if options.reset:
        conf.reset_dir(conf.TEMP_DATA_DIR)
    
    # run all the tests
    if options.coverage:
        coverdir = conf.path_join(conf.TEST_DIR, 'coverage')
        good, bad, skip = testutil.generate_coverage(run, coverdir,
                                                     targets=targets,
                                                     options=options)