def _handle_error(self, target, prefix='Target FAILED'): """ Perform logging for the exception on the stack, and return an array of string to be appended to the global build errors list. target - should be a BaseTarget (not a BuildTarget) prefix - Prefix of exception, describing what we were doing at the time """ e = sys.exc_info()[1] logged = False if not isinstance(e, BuildException): if not isinstance(e, (EnvironmentError)): # most problems should be wrapped as BuildException already; let's make sure we always # get an ERROR-level message for things like syntax errors etc #log.exception('%s: unexpected (non-build) exception in %s'%(prefix, target)) #logged = True pass # this duplicates the stack trace we get at ERROR level from toMultiLineString e = BuildException('%s due to %s' % (prefix, e.__class__.__name__), causedBy=True) if not logged and log.isEnabledFor( logging.DEBUG ): # make sure the stack trace is at least available at debug log.debug('Handling error: %s', traceback.format_exc()) # one-line summary (if in teamcity mode, we'd use teamcity syntax to log this) #log.error('%s: %s', prefix, e.toSingleLineString(target)) # also useful to have the full stack trace, but only at INFO level #log.info('%s (details): %s\n', prefix, e.toMultiLineString(target, includeStack=True)) # one-line summary (if in teamcity mode, we'd use teamcity syntax to log this) #log.error('%s: %s', prefix, e.toSingleLineString(target)) # also useful to have the full stack trace, but only at INFO level log.error('%s: %s\n', prefix, e.toMultiLineString(target, includeStack=True), extra=e.getLoggerExtraArgDict(target)) return [e.toSingleLineString(target)]
def _handle_error(self, target, prefix='Target FAILED'): """ Perform logging for the exception on the stack, and return an array of string to be appended to the global build errors list. target - should be a BaseTarget (not a BuildTarget) prefix - Prefix of exception, describing what we were doing at the time """ e = sys.exc_info()[1] logged = False if not isinstance(e, BuildException): if not isinstance(e, (EnvironmentError)): # most problems should be wrapped as BuildException already; let's make sure we always # get an ERROR-level message for things like syntax errors etc #log.exception('%s: unexpected (non-build) exception in %s'%(prefix, target)) #logged = True pass # this duplicates the stack trace we get at ERROR level from toMultiLineString e = BuildException('%s due to %s'%(prefix, e.__class__.__name__), causedBy=True) if not logged and log.isEnabledFor(logging.DEBUG): # make sure the stack trace is at least available at debug log.debug('Handling error: %s', traceback.format_exc()) # one-line summary (if in teamcity mode, we'd use teamcity syntax to log this) #log.error('%s: %s', prefix, e.toSingleLineString(target)) # also useful to have the full stack trace, but only at INFO level #log.info('%s (details): %s\n', prefix, e.toMultiLineString(target, includeStack=True)) # one-line summary (if in teamcity mode, we'd use teamcity syntax to log this) #log.error('%s: %s', prefix, e.toSingleLineString(target)) # also useful to have the full stack trace, but only at INFO level log.error('%s: %s\n', prefix, e.toMultiLineString(target, includeStack=True), extra=e.getLoggerExtraArgDict(target)) return [e.toSingleLineString(target)]
def _deps_target(self, tname): """ Function called by a worker to check the deps for a single target tname - this is the canonical PATH of the target, not the name """ errors = [] pending = [ ] # list of new jobs to done as part of dependency resolution log.debug("Inspecting dependencies of target %s", tname) target = self.targets.get(tname, None) # a BuildTarget object # only log dependency status periodically since usually its very quick # and not worthwhile with self.lock: self.index += 1 log.critical(self.progressFormat + "Resolving dependencies for %s", self.index, self.total, target) if not target: assert False # I'm not sure how we can get here, think it should actually be impossible if not exists(tname): errors.append("Unknown target %s" % tname) else: log.debug( 'Scheduler cannot find target in build file or on disk: %s', target) # is this a problem? maybe assert False here? elif self.options['ignore-deps'] and exists(target.path): # in this mode, any target that already exists should be treated as # a leaf with no deps which means it won't be built under any # circumstances (even if a target it depends on is rebuilt), # and allows us to avoid the time-consuming transitive resolution # of dependencies. Has to be implemented this way, since if we were # to allow ANY already-existing target to be re-built in the normal # way, we would have to resolve dependencies for all targets in # order to ensure we never rebuild a target at the same time as # a target that depends on it. We're essentially deleting the entire # dependency subtree for all nodes that exist already log.debug( 'Scheduler is treating existing target as a leaf and will not rebuild it: %s', target) self.leaves.append(target) elif not (self.options['ignore-deps'] and self.options['clean']): try: deps = target.resolveDependencies(self.context) if deps: log.debug('%s has %d dependencies', target.target, len(deps)) targetDeps = [] # just for logging leaf = True for dname in deps: #log.debug('Processing dependency: %s -> %s', tname, dname) dpath = toLongPathSafe(dname) dnameIsDirPath = isDirPath(dname) if dname in self.targets: leaf = False dtarget = self.targets[dname] if dtarget in target.rdeps(): raise Exception( 'Circular dependency between targets: %s and %s' % (dtarget.name, target.name)) dtarget.rdep(target) self._updatePriority(target) target.increment() if not dnameIsDirPath: target.filedep( dname ) # might have an already built target dependency which is still newer else: # special case directory target deps - must use stamp file not dir, to avoid re-walking # the directory needlessly, and possibly making a wrong decision if the dir pathset is # from a filtered pathset target.filedep(self.targets[dname].stampfile) with self.lock: if not dname in self.pending: self.pending.append(dname) pending.append((0, dname)) targetDeps.append(str(self.targets[dname])) else: dstat = getstat(dpath) if dstat and ( (dnameIsDirPath and stat.S_ISDIR(dstat.st_mode)) or (not dnameIsDirPath and stat.S_ISREG(dstat.st_mode))): target.filedep(dname) else: # in the specific case of a dependency error, build will definitely fail immediately so we should log line number # at ERROR log level not just at info ex = BuildException("Cannot find dependency %s" % dname) log.error( 'FAILED during dependency resolution: %s', ex.toMultiLineString(target, includeStack=False), extra=ex.getLoggerExtraArgDict(target)) assert not os.path.exists(dpath), dname errors.append(ex.toSingleLineString(target)) break if leaf: log.info( 'Target dependencies of %s (priority %s) are: <no dependencies>', target, -target.priority) self.leaves.append(target) else: log.info( 'Target dependencies of %s (priority %s) are: %s', target, -target.priority, ', '.join(targetDeps) ) # this is important for debugging missing dependencies etc except Exception as e: errors.extend( self._handle_error( target.target, prefix="Target FAILED during dependency resolution")) else: # For clean ignoring deps we want to be as light-weight as possible self.leaves.append(target) if pending: # if we're adding some new jobs with self.lock: self.total += len(pending) # NB: the keep-going option does NOT apply to dependency failures return (pending, errors, 0 == len(errors))
def _deps_target(self, tname): """ Function called by a worker to check the deps for a single target tname - this is the canonical PATH of the target, not the name """ errors = [] pending = [] # list of new jobs to done as part of dependency resolution log.debug("Inspecting dependencies of target %s", tname) target = self.targets.get(tname, None) # only log dependency status periodically since usually its very quick # and not worthwhile with self.lock: self.index += 1 log.critical(self.progressFormat+"Resolving dependencies for %s", self.index, self.total, target) if not target: assert False # I'm not sure how we can get here, think it should actually be impossible if not exists(tname): errors.append("Unknown target %s" % tname) else: log.debug('Scheduler cannot find target in build file or on disk: %s', target) # is this a problem? maybe assert False here? elif self.options['ignore-deps'] and exists(target.path): # in this mode, any target that already exists should be treated as # a leaf with no deps which means it won't be built under any # circumstances (even if a target it depends on is rebuilt), # and allows us to avoid the time-consuming transitive resolution # of dependencies. Has to be implemented this way, since if we were # to allow ANY already-existing target to be re-built in the normal # way, we would have to resolve dependencies for all targets in # order to ensure we never rebuild a target at the same time as # a target that depends on it. We're essentially deleting the entire # dependency subtree for all nodes that exist already log.debug('Scheduler is treating existing target as a leaf and will not rebuild it: %s', target) self.leaves.append(target) elif not (self.options['ignore-deps'] and self.options['clean']): try: deps = target.resolveDependencies(self.context) if deps: log.debug('%s has %d dependencies', target.target, len(deps)) targetDeps = [] # just for logging leaf = True for dname in deps: #log.debug('Processing dependency: %s -> %s', tname, dname) dpath = normLongPath(dname) if dname in self.targets: leaf = False dtarget = self.targets[dname] if dtarget in target.rdeps(): raise Exception('Circular dependency between targets: %s and %s'%(dtarget.name, target.name)) dtarget.rdep(target) self._updatePriority(target) target.increment() if not isDirPath(dname): target.filedep(dname) # might have an already built target dependency which is still newer else: # special case directory target deps - must use stamp file not dir, to avoid re-walking # the directory needlessly, and possibly making a wrong decision if the dir pathset is # from a filtered pathset target.filedep(self.targets[dname].stampfile) with self.lock: if not dname in self.pending: self.pending.append(dname) pending.append((0, dname)) targetDeps.append(str(self.targets[dname])) elif (isDirPath(dname) and isdir(dpath)) or (not isDirPath(dname) and isfile(dpath)): target.filedep(dname) else: # in the specific case of a dependency error, build will definitely fail immediately so we should log line number # at ERROR log level not just at info ex = BuildException("Cannot find dependency %s" % dname) log.error('FAILED during dependency resolution: %s', ex.toMultiLineString(target, includeStack=False), extra=ex.getLoggerExtraArgDict(target)) assert not os.path.exists(dpath), dname errors.append(ex.toSingleLineString(target)) break if leaf: log.info('Target dependencies of %s (priority %s) are: <no dependencies>', target, -target.priority) self.leaves.append(target) else: log.info('Target dependencies of %s (priority %s) are: %s', target, -target.priority, ', '.join(targetDeps)) # this is important for debugging missing dependencies etc except Exception as e: errors.extend(self._handle_error(target.target, prefix="Target FAILED during dependency resolution")) else: # For clean ignoring deps we want to be as light-weight as possible self.leaves.append(target) if pending: # if we're adding some new jobs with self.lock: self.total += len(pending) # NB: the keep-going option does NOT apply to dependency failures return (pending, errors, 0 == len(errors))