Ejemplo n.º 1
0
def multicheck(self,*k,**kw):
	self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)),**kw)
	class par(object):
		def __init__(self):
			self.keep=False
			self.returned_tasks=[]
			self.task_sigs={}
		def total(self):
			return len(tasks)
		def to_log(self,*k,**kw):
			return
	bld=par()
	tasks=[]
	for dct in k:
		x=cfgtask(bld=bld)
		tasks.append(x)
		x.args=dct
		x.bld=bld
		x.conf=self
		x.args=dct
		x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
	def it():
		yield tasks
		while 1:
			yield[]
	p=Runner.Parallel(bld,Options.options.jobs)
	p.biter=it()
	p.start()
	for x in tasks:
		x.logger.memhandler.flush()
	for x in tasks:
		if x.hasrun!=Task.SUCCESS:
			self.end_msg(kw.get('errmsg','no'),color='YELLOW',**kw)
			self.fatal(kw.get('fatalmsg',None)or'One of the tests has failed, see the config.log for more information')
	self.end_msg('ok',**kw)
Ejemplo n.º 2
0
def multicheck(self,*k,**kw):
	self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)),**kw)
	class par(object):
		def __init__(self):
			self.keep=False
			self.returned_tasks=[]
			self.task_sigs={}
		def total(self):
			return len(tasks)
		def to_log(self,*k,**kw):
			return
	bld=par()
	tasks=[]
	for dct in k:
		x=cfgtask(bld=bld)
		tasks.append(x)
		x.args=dct
		x.bld=bld
		x.conf=self
		x.args=dct
		x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
	def it():
		yield tasks
		while 1:
			yield[]
	p=Runner.Parallel(bld,Options.options.jobs)
	p.biter=it()
	p.start()
	for x in tasks:
		x.logger.memhandler.flush()
	for x in tasks:
		if x.hasrun!=Task.SUCCESS:
			self.end_msg(kw.get('errmsg','no'),color='YELLOW',**kw)
			self.fatal(kw.get('fatalmsg',None)or'One of the tests has failed, see the config.log for more information')
	self.end_msg('ok',**kw)
Ejemplo n.º 3
0
def multicheck(self, *k, **kw):
    """
	Use tuples to perform parallel configuration tests
	"""
    self.start_msg(kw.get("msg", "Executing %d configuration tests" % len(k)), **kw)

    class par(object):
        def __init__(self):
            self.keep = False
            self.returned_tasks = []
            self.task_sigs = {}
            self.progress_bar = 0

        def total(self):
            return len(tasks)

        def to_log(self, *k, **kw):
            return

    bld = par()
    tasks = []
    for dct in k:
        x = cfgtask(bld=bld)
        tasks.append(x)
        x.args = dct
        x.bld = bld
        x.conf = self
        x.args = dct

        # bind a logger that will keep the info in memory
        x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

    def it():
        yield tasks
        while 1:
            yield []

    p = Runner.Parallel(bld, Options.options.jobs)
    p.biter = it()
    p.start()

    # flush the logs in order into the config.log
    for x in tasks:
        x.logger.memhandler.flush()

    if p.error:
        for x in p.error:
            if getattr(x, "err_msg", None):
                self.to_log(x.err_msg)
                self.end_msg("fail", color="RED")
                raise Errors.WafError("There is an error in the library, read config.log for more information")

    for x in tasks:
        if x.hasrun != Task.SUCCESS:
            self.end_msg(kw.get("errmsg", "no"), color="YELLOW", **kw)
            self.fatal(kw.get("fatalmsg", None) or "One of the tests has failed, read config.log for more information")

    self.end_msg("ok", **kw)
Ejemplo n.º 4
0
def multicheck(self, *k, **kw):
	"""
	Use tuples to perform parallel configuration tests
	"""
	self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw)

	class par(object):
		def __init__(self):
			self.keep = False
			self.returned_tasks = []
			self.task_sigs = {}
			self.progress_bar = 0
		def total(self):
			return len(tasks)
		def to_log(self, *k, **kw):
			return

	bld = par()
	tasks = []
	for dct in k:
		x = cfgtask(bld=bld)
		tasks.append(x)
		x.args = dct
		x.bld = bld
		x.conf = self
		x.args = dct

		# bind a logger that will keep the info in memory
		x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

	def it():
		yield tasks
		while 1:
			yield []
	p = Runner.Parallel(bld, Options.options.jobs)
	p.biter = it()
	p.start()

	# flush the logs in order into the config.log
	for x in tasks:
		x.logger.memhandler.flush()

	if p.error:
		for x in p.error:
			if getattr(x, 'err_msg', None):
				self.to_log(x.err_msg)
				self.end_msg('fail', color='RED')
				raise Errors.WafError('There is an error in the library, read config.log for more information')

	for x in tasks:
		if x.hasrun != Task.SUCCESS:
			self.end_msg(kw.get('errmsg', 'no'), color='YELLOW', **kw)
			self.fatal(kw.get('fatalmsg', None) or 'One of the tests has failed, read config.log for more information')

	self.end_msg('ok', **kw)
Ejemplo n.º 5
0
def multicheck(self, *k, **kw):
	"""
	Runs configuration tests in parallel. The results are printed sequentially at the end of the build.
	"""
	self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw)

	class par(object):
		def __init__(self):
			self.keep = False
			self.task_sigs = {}
			self.progress_bar = 0
		def total(self):
			return len(tasks)
		def to_log(self, *k, **kw):
			return

	bld = par()
	tasks = []
	for dct in k:
		x = cfgtask(bld=bld)
		tasks.append(x)
		x.args = dct
		x.bld = bld
		x.conf = self
		x.args = dct

		# bind a logger that will keep the info in memory
		x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

	def it():
		yield tasks
		while 1:
			yield []
	bld.producer = p = Runner.Parallel(bld, Options.options.jobs)
	p.biter = it()
	p.start()

	# flush the logs in order into the config.log
	for x in tasks:
		x.logger.memhandler.flush()

	if p.error:
		for x in p.error:
			if getattr(x, 'err_msg', None):
				self.to_log(x.err_msg)
				self.end_msg('fail', color='RED')
				raise Errors.WafError('There is an error in the library, read config.log for more information')

	for x in tasks:
		if x.hasrun != Task.SUCCESS:
			self.end_msg(kw.get('errmsg', 'no'), color='YELLOW', **kw)
			self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information')

	self.end_msg('ok', **kw)
Ejemplo n.º 6
0
def multicheck(self, *k, **kw):
    self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)),
                   **kw)
    for var in ('DEFINES', DEFKEYS):
        self.env.append_value(var, [])
    self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {}

    class par(object):
        def __init__(self):
            self.keep = False
            self.task_sigs = {}
            self.progress_bar = 0

        def total(self):
            return len(tasks)

        def to_log(self, *k, **kw):
            return

    bld = par()
    bld.keep = kw.get('run_all_tests', True)
    tasks = []
    id_to_task = {}
    for dct in k:
        x = Task.classes['cfgtask'](bld=bld)
        tasks.append(x)
        x.args = dct
        x.bld = bld
        x.conf = self
        x.args = dct
        x.logger = Logs.make_mem_logger(str(id(x)), self.logger)
        if 'id' in dct:
            id_to_task[dct['id']] = x
    for x in tasks:
        for key in Utils.to_list(x.args.get('before_tests', [])):
            tsk = id_to_task[key]
            if not tsk:
                raise ValueError('No test named %r' % key)
            tsk.run_after.add(x)
        for key in Utils.to_list(x.args.get('after_tests', [])):
            tsk = id_to_task[key]
            if not tsk:
                raise ValueError('No test named %r' % key)
            x.run_after.add(tsk)

    def it():
        yield tasks
        while 1:
            yield []

    bld.producer = p = Runner.Parallel(bld, Options.options.jobs)
    bld.multicheck_lock = Utils.threading.Lock()
    p.biter = it()
    self.end_msg('started')
    p.start()
    for x in tasks:
        x.logger.memhandler.flush()
    self.start_msg('-> processing test results')
    if p.error:
        for x in p.error:
            if getattr(x, 'err_msg', None):
                self.to_log(x.err_msg)
                self.end_msg('fail', color='RED')
                raise Errors.WafError(
                    'There is an error in the library, read config.log for more information'
                )
    failure_count = 0
    for x in tasks:
        if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN):
            failure_count += 1
    if failure_count:
        self.end_msg(kw.get('errmsg', '%s test failed' % failure_count),
                     color='YELLOW',
                     **kw)
    else:
        self.end_msg('all ok', **kw)
    for x in tasks:
        if x.hasrun != Task.SUCCESS:
            if x.args.get('mandatory', True):
                self.fatal(
                    kw.get('fatalmsg') or
                    'One of the tests has failed, read config.log for more information'
                )
Ejemplo n.º 7
0
def multicheck(self, *k, **kw):
    """
	Runs configuration tests in parallel; results are printed sequentially at the end of the build
	but each test must provide its own msg value to display a line::

		def test_build(ctx):
			ctx.in_msg = True # suppress console outputs
			ctx.check_large_file(mandatory=False)

		conf.multicheck(
			{'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False},
			{'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False},
			{'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'},
			{'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'},
			msg       = 'Checking for headers in parallel',
			mandatory = True, # mandatory tests raise an error at the end
			run_all_tests = True, # try running all tests
		)

	The configuration tests may modify the values in conf.env in any order, and the define
	values can affect configuration tests being executed. It is hence recommended
	to provide `uselib_store` values with `global_define=False` to prevent such issues.
	"""
    self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)),
                   **kw)

    # Force a copy so that threads append to the same list at least
    # no order is guaranteed, but the values should not disappear at least
    for var in ('DEFINES', DEFKEYS):
        self.env.append_value(var, [])
    self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {}

    # define a task object that will execute our tests
    class par(object):
        def __init__(self):
            self.keep = False
            self.task_sigs = {}
            self.progress_bar = 0

        def total(self):
            return len(tasks)

        def to_log(self, *k, **kw):
            return

    bld = par()
    bld.keep = kw.get('run_all_tests', True)
    tasks = []

    id_to_task = {}
    for dct in k:
        x = Task.classes['cfgtask'](bld=bld)
        tasks.append(x)
        x.args = dct
        x.bld = bld
        x.conf = self
        x.args = dct

        # bind a logger that will keep the info in memory
        x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

        if 'id' in dct:
            id_to_task[dct['id']] = x

    # second pass to set dependencies with after_test/before_test
    for x in tasks:
        for key in Utils.to_list(x.args.get('before_tests', [])):
            tsk = id_to_task[key]
            if not tsk:
                raise ValueError('No test named %r' % key)
            tsk.run_after.add(x)
        for key in Utils.to_list(x.args.get('after_tests', [])):
            tsk = id_to_task[key]
            if not tsk:
                raise ValueError('No test named %r' % key)
            x.run_after.add(tsk)

    def it():
        yield tasks
        while 1:
            yield []

    bld.producer = p = Runner.Parallel(bld, Options.options.jobs)
    bld.multicheck_lock = Utils.threading.Lock()
    p.biter = it()

    self.end_msg('started')
    p.start()

    # flush the logs in order into the config.log
    for x in tasks:
        x.logger.memhandler.flush()

    self.start_msg('-> processing test results')
    if p.error:
        for x in p.error:
            if getattr(x, 'err_msg', None):
                self.to_log(x.err_msg)
                self.end_msg('fail', color='RED')
                raise Errors.WafError(
                    'There is an error in the library, read config.log for more information'
                )

    failure_count = 0
    for x in tasks:
        if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN):
            failure_count += 1

    if failure_count:
        self.end_msg(kw.get('errmsg', '%s test failed' % failure_count),
                     color='YELLOW',
                     **kw)
    else:
        self.end_msg('all ok', **kw)

    for x in tasks:
        if x.hasrun != Task.SUCCESS:
            if x.args.get('mandatory', True):
                self.fatal(
                    kw.get('fatalmsg') or
                    'One of the tests has failed, read config.log for more information'
                )
Ejemplo n.º 8
0
def multicheck(self, *k, **kw):
	"""
	Runs configuration tests in parallel; results are printed sequentially at the end of the build
	but each test must provide its own msg value to display a line::

		def test_build(ctx):
			ctx.in_msg = True # suppress console outputs
			ctx.check_large_file(mandatory=False)

		conf.multicheck(
			{'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False},
			{'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False},
			{'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'},
			{'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'},
			msg       = 'Checking for headers in parallel',
			mandatory = True, # mandatory tests raise an error at the end
			run_all_tests = True, # try running all tests
		)

	The configuration tests may modify the values in conf.env in any order, and the define
	values can affect configuration tests being executed. It is hence recommended
	to provide `uselib_store` values with `global_define=False` to prevent such issues.
	"""
	self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw)

	# Force a copy so that threads append to the same list at least
	# no order is guaranteed, but the values should not disappear at least
	for var in ('DEFINES', DEFKEYS):
		self.env.append_value(var, [])
	self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {}

	# define a task object that will execute our tests
	class par(object):
		def __init__(self):
			self.keep = False
			self.task_sigs = {}
			self.progress_bar = 0
		def total(self):
			return len(tasks)
		def to_log(self, *k, **kw):
			return

	bld = par()
	bld.keep = kw.get('run_all_tests', True)
	bld.imp_sigs = {}
	tasks = []

	id_to_task = {}
	for dct in k:
		x = Task.classes['cfgtask'](bld=bld, env=None)
		tasks.append(x)
		x.args = dct
		x.bld = bld
		x.conf = self
		x.args = dct

		# bind a logger that will keep the info in memory
		x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

		if 'id' in dct:
			id_to_task[dct['id']] = x

	# second pass to set dependencies with after_test/before_test
	for x in tasks:
		for key in Utils.to_list(x.args.get('before_tests', [])):
			tsk = id_to_task[key]
			if not tsk:
				raise ValueError('No test named %r' % key)
			tsk.run_after.add(x)
		for key in Utils.to_list(x.args.get('after_tests', [])):
			tsk = id_to_task[key]
			if not tsk:
				raise ValueError('No test named %r' % key)
			x.run_after.add(tsk)

	def it():
		yield tasks
		while 1:
			yield []
	bld.producer = p = Runner.Parallel(bld, Options.options.jobs)
	bld.multicheck_lock = Utils.threading.Lock()
	p.biter = it()

	self.end_msg('started')
	p.start()

	# flush the logs in order into the config.log
	for x in tasks:
		x.logger.memhandler.flush()

	self.start_msg('-> processing test results')
	if p.error:
		for x in p.error:
			if getattr(x, 'err_msg', None):
				self.to_log(x.err_msg)
				self.end_msg('fail', color='RED')
				raise Errors.WafError('There is an error in the library, read config.log for more information')

	failure_count = 0
	for x in tasks:
		if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN):
			failure_count += 1

	if failure_count:
		self.end_msg(kw.get('errmsg', '%s test failed' % failure_count), color='YELLOW', **kw)
	else:
		self.end_msg('all ok', **kw)

	for x in tasks:
		if x.hasrun != Task.SUCCESS:
			if x.args.get('mandatory', True):
				self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information')
Ejemplo n.º 9
0
def multicheck(self, *k, **kw):
    self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)),
                   **kw)
    for var in ('DEFINES', DEFKEYS):
        self.env.append_value(var, [])
    self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {}

    class par(object):
        def __init__(self):
            self.keep = False
            self.task_sigs = {}
            self.progress_bar = 0

        def total(self):
            return len(tasks)

        def to_log(self, *k, **kw):
            return

    bld = par()
    bld.keep = kw.get('run_all_tests', True)
    tasks = []
    for dct in k:
        x = Task.classes['cfgtask'](bld=bld)
        tasks.append(x)
        x.args = dct
        x.bld = bld
        x.conf = self
        x.args = dct
        x.logger = Logs.make_mem_logger(str(id(x)), self.logger)

    def it():
        yield tasks
        while 1:
            yield []

    bld.producer = p = Runner.Parallel(bld, Options.options.jobs)
    p.biter = it()
    p.start()
    for x in tasks:
        x.logger.memhandler.flush()
    if p.error:
        for x in p.error:
            if getattr(x, 'err_msg', None):
                self.to_log(x.err_msg)
                self.end_msg('fail', color='RED')
                raise Errors.WafError(
                    'There is an error in the library, read config.log for more information'
                )
    failure_count = 0
    for x in tasks:
        if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN):
            failure_count += 1
    if failure_count:
        self.end_msg(kw.get('errmsg', '%s test failed' % failure_count),
                     color='YELLOW',
                     **kw)
    else:
        self.end_msg('all ok', **kw)
    for x in tasks:
        if 'msg' in x.args:
            self.start_msg(x.args['msg'])
            if x.hasrun == Task.NOT_RUN:
                self.end_msg('test cancelled', 'YELLOW')
            elif x.hasrun != Task.SUCCESS:
                self.end_msg(x.args.get('errmsg', 'no'), 'YELLOW')
            else:
                self.end_msg(x.args.get('okmsg', 'yes'), 'GREEN')
    for x in tasks:
        if x.hasrun != Task.SUCCESS:
            if x.args.get('mandatory', True):
                self.fatal(
                    kw.get('fatalmsg') or
                    'One of the tests has failed, read config.log for more information'
                )
Ejemplo n.º 10
0
def multicheck(self,*k,**kw):
	self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)),**kw)
	for var in('DEFINES',DEFKEYS):
		self.env.append_value(var,[])
	self.env.DEFINE_COMMENTS=self.env.DEFINE_COMMENTS or{}
	class par(object):
		def __init__(self):
			self.keep=False
			self.task_sigs={}
			self.progress_bar=0
		def total(self):
			return len(tasks)
		def to_log(self,*k,**kw):
			return
	bld=par()
	bld.keep=kw.get('run_all_tests',True)
	tasks=[]
	for dct in k:
		x=Task.classes['cfgtask'](bld=bld)
		tasks.append(x)
		x.args=dct
		x.bld=bld
		x.conf=self
		x.args=dct
		x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
	def it():
		yield tasks
		while 1:
			yield[]
	bld.producer=p=Runner.Parallel(bld,Options.options.jobs)
	p.biter=it()
	p.start()
	for x in tasks:
		x.logger.memhandler.flush()
	if p.error:
		for x in p.error:
			if getattr(x,'err_msg',None):
				self.to_log(x.err_msg)
				self.end_msg('fail',color='RED')
				raise Errors.WafError('There is an error in the library, read config.log for more information')
	failure_count=0
	for x in tasks:
		if x.hasrun not in(Task.SUCCESS,Task.NOT_RUN):
			failure_count+=1
	if failure_count:
		self.end_msg(kw.get('errmsg','%s test failed'%failure_count),color='YELLOW',**kw)
	else:
		self.end_msg('all ok',**kw)
	for x in tasks:
		if'msg'in x.args:
			self.start_msg(x.args['msg'])
			if x.hasrun==Task.NOT_RUN:
				self.end_msg('test cancelled','YELLOW')
			elif x.hasrun!=Task.SUCCESS:
				self.end_msg(x.args.get('errmsg','no'),'YELLOW')
			else:
				self.end_msg(x.args.get('okmsg','yes'),'GREEN')
	for x in tasks:
		if x.hasrun!=Task.SUCCESS:
			if x.args.get('mandatory',True):
				self.fatal(kw.get('fatalmsg')or'One of the tests has failed, read config.log for more information')