def download(self, ud, d): """Fetch urls""" svkroot = ud.host + ud.path svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false") tmpfile = tmpfile.strip() if not tmpfile: logger.error() raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url) # check out sources there os.chdir(tmpfile) logger.info("Fetch " + ud.url) logger.debug(1, "Running %s", svkcmd) runfetchcmd(svkcmd, d, cleanup = [tmpfile]) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)), d, cleanup = [ud.localpath]) # cleanup bb.utils.prunedir(tmpfile)
def buildDepgraph(self): all_depends = self.status.all_depends pn_provides = self.status.pn_provides localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) def calc_bbfile_priority(filename): for (regex, pri) in self.status.bbfile_config_priorities: if regex.match(filename): return pri return 0 # Handle PREFERRED_PROVIDERS for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): try: (providee, provider) = p.split(':') except: bb.msg.fatal( bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p) continue if providee in self.status.preferred and self.status.preferred[ providee] != provider: bb.msg.error( bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) self.status.preferred[providee] = provider # Calculate priorities for each file for p in self.status.pkg_fn.keys(): self.status.bbfile_priority[p] = calc_bbfile_priority(p)
def fileBuild( self, params, cmd = "build" ): """Parse and build a .bb file""" global last_exception name = params[0] bf = completeFilePath( name ) print "SHELL: Calling '%s' on '%s'" % ( cmd, bf ) oldcmd = cooker.configuration.cmd cooker.configuration.cmd = cmd thisdata = data.createCopy(cooker.configuration.data) data.update_data(thisdata) data.expandKeys(thisdata) try: bbfile_data = parse.handle( bf, thisdata ) except parse.ParseError: print "ERROR: Unable to open or parse '%s'" % bf else: # Remove stamp for target if force mode active if cooker.configuration.force: bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (cmd, bf)) bb.build.del_stamp('do_%s' % cmd, bbfile_data) item = data.getVar('PN', bbfile_data, 1) data.setVar( "_task_cache", [], bbfile_data ) # force try: cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True ) except build.EventException, e: print "ERROR: Couldn't build '%s'" % name last_exception = e
def _get_layer_collections(layer_path, lconf=None, data=None): import bb.parse import bb.data if lconf is None: lconf = os.path.join(layer_path, 'conf', 'layer.conf') if data is None: ldata = bb.data.init() bb.parse.init_parser(ldata) else: ldata = data.createCopy() ldata.setVar('LAYERDIR', layer_path) try: ldata = bb.parse.handle(lconf, ldata, include=True) except BaseException as exc: raise LayerError(exc) ldata.expandVarref('LAYERDIR') collections = (ldata.getVar('BBFILE_COLLECTIONS', True) or '').split() if not collections: name = os.path.basename(layer_path) collections = [name] collections = {c: {} for c in collections} for name in collections: priority = ldata.getVar('BBFILE_PRIORITY_%s' % name, True) pattern = ldata.getVar('BBFILE_PATTERN_%s' % name, True) depends = ldata.getVar('LAYERDEPENDS_%s' % name, True) collections[name]['priority'] = priority collections[name]['pattern'] = pattern collections[name]['depends'] = depends return collections
def buildTargets(self, targets): """ Attempt to build the targets specified """ buildname = bb.data.getVar("BUILDNAME", self.configuration.data) bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data)) localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) runlist = [] try: for k in targets: taskdata.add_provider(localdata, self.status, k) runlist.append([k, "do_%s" % self.configuration.cmd]) taskdata.add_unresolved(localdata, self.status) except bb.providers.NoProvider: sys.exit(1) rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) rq.prepare_runqueue() try: failures = rq.execute_runqueue() except runqueue.TaskFailure, fnids: failures = 0 for fnid in fnids: bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) failures = failures + 1 bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures)) sys.exit(1)
def download(self, loc, ud, d): """Fetch urls""" svkroot = ud.host + ud.path svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false") tmpfile = tmpfile.strip() if not tmpfile: logger.error() raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", loc) # check out sources there os.chdir(tmpfile) logger.info("Fetch " + loc) logger.debug(1, "Running %s", svkcmd) runfetchcmd(svkcmd, d, cleanup = [tmpfile]) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)), d, cleanup = [ud.localpath]) # cleanup bb.utils.prunedir(tmpfile)
def buildDepgraph( self ): all_depends = self.status.all_depends pn_provides = self.status.pn_provides localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) def calc_bbfile_priority(filename): for (regex, pri) in self.status.bbfile_config_priorities: if regex.match(filename): return pri return 0 # Handle PREFERRED_PROVIDERS for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): try: (providee, provider) = p.split(':') except: bb.msg.error(bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p) continue if providee in self.status.preferred and self.status.preferred[providee] != provider: bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) self.status.preferred[providee] = provider # Calculate priorities for each file for p in self.status.pkg_fn.keys(): self.status.bbfile_priority[p] = calc_bbfile_priority(p)
def exec_task(task, d): """Execute an BB 'task' The primary difference between executing a task versus executing a function is that a task exists in the task digraph, and therefore has dependencies amongst other tasks.""" # Check whther this is a valid task if not data.getVarFlag(task, 'task', d): raise EventException("No such task", InvalidTask(task, d)) try: bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task) old_overrides = data.getVar('OVERRIDES', d, 0) localdata = data.createCopy(d) data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata) data.update_data(localdata) data.expandKeys(localdata) event.fire(TaskStarted(task, localdata)) exec_func(task, localdata) event.fire(TaskSucceeded(task, localdata)) except FuncFailed, message: # Try to extract the optional logfile try: (msg, logfile) = message except: logfile = None msg = message bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message ) failedevent = TaskFailed(msg, logfile, task, d) event.fire(failedevent) raise EventException("Function failed in task: %s" % message, failedevent)
def execute(graph, item): if data.getVarFlag(item, 'task', d): if item in task_cache: return 1 if task != item: # deeper than toplevel, exec w/ deps exec_task(item, d) return 1 try: bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item) old_overrides = data.getVar('OVERRIDES', d, 0) localdata = data.createCopy(d) data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata) data.update_data(localdata) event.fire(TaskStarted(item, localdata)) exec_func(item, localdata) event.fire(TaskSucceeded(item, localdata)) task_cache.append(item) data.setVar('_task_cache', task_cache, d) except FuncFailed, reason: bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason ) failedevent = TaskFailed(item, d) event.fire(failedevent) raise EventException("Function failed in task: %s" % reason, failedevent)
def exec_task(task, d): """Execute an BB 'task' The primary difference between executing a task versus executing a function is that a task exists in the task digraph, and therefore has dependencies amongst other tasks.""" # Check whther this is a valid task if not data.getVarFlag(task, 'task', d): raise EventException("No such task", InvalidTask(task, d)) try: bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task) old_overrides = data.getVar('OVERRIDES', d, 0) localdata = data.createCopy(d) data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata) data.update_data(localdata) data.expandKeys(localdata) event.fire(TaskStarted(task, localdata), localdata) exec_func(task, localdata) event.fire(TaskSucceeded(task, localdata), localdata) except FuncFailed, message: # Try to extract the optional logfile try: (msg, logfile) = message except: logfile = None msg = message bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message) failedevent = TaskFailed(msg, logfile, task, d) event.fire(failedevent, d) raise EventException("Function failed in task: %s" % message, failedevent)
def exec_task(fn, task, d): """Execute a BB 'task' Execution of a task involves a bit more setup than executing a function, running it with its own local metadata, and with some useful variables set. """ # Check whther this is a valid task if not data.getVarFlag(task, 'task', d): raise InvalidTask(task, d) try: logger.debug(1, "Executing task %s", task) old_overrides = data.getVar('OVERRIDES', d, 0) localdata = data.createCopy(d) data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata) data.update_data(localdata) data.expandKeys(localdata) data.setVar('BB_FILENAME', fn, d) data.setVar('BB_CURRENTTASK', task[3:], d) event.fire(TaskStarted(task, localdata), localdata) exec_func(task, localdata) event.fire(TaskSucceeded(task, localdata), localdata) except FuncFailed as exc: event.fire(TaskFailed(exc.name, exc.logfile, localdata), localdata) raise # make stamp, or cause event and raise exception if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d): make_stamp(task, d)
def buildTargets(self, targets): """ Attempt to build the targets specified """ buildname = bb.data.getVar("BUILDNAME", self.configuration.data) bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data)) localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort, self.configuration.tryaltconfigs) runlist = [] try: for k in targets: taskdata.add_provider(localdata, self.status, k) runlist.append([k, "do_%s" % self.configuration.cmd]) taskdata.add_unresolved(localdata, self.status) except bb.providers.NoProvider: sys.exit(1) rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) rq.prepare_runqueue() try: failures = rq.execute_runqueue() except runqueue.TaskFailure, fnids: failures = 0 for fnid in fnids: bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) failures = failures + 1 bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures)) sys.exit(1)
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn=None, item=None): """ Find the first provider in pkg_pn with a PREFERRED_VERSION set. """ preferred_file = None preferred_ver = None localdata = data.createCopy(cfgData) bb.data.setVar( 'OVERRIDES', "%s:pn-%s:%s" % (data.getVar('OVERRIDES', localdata), pn, pn), localdata) bb.data.update_data(localdata) preferred_v = bb.data.getVar('PREFERRED_VERSION', localdata, True) if preferred_v: m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) if m: if m.group(1): preferred_e = int(m.group(1)[:-1]) else: preferred_e = None preferred_v = m.group(2) if m.group(3): preferred_r = m.group(3)[1:] else: preferred_r = None else: preferred_e = None preferred_r = None for file_set in pkg_pn: for f in file_set: pe, pv, pr = dataCache.pkg_pepvpr[f] if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): preferred_file = f preferred_ver = (pe, pv, pr) break if preferred_file: break if preferred_r: pv_str = '%s-%s' % (preferred_v, preferred_r) else: pv_str = preferred_v if not (preferred_e is None): pv_str = '%s:%s' % (preferred_e, pv_str) itemstr = "" if item: itemstr = " (for item %s)" % item if preferred_file is None: logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) else: logger.debug( 1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) return (preferred_ver, preferred_file)
def _task_data(fn, task, d): localdata = data.createCopy(d) localdata.setVar("BB_FILENAME", fn) localdata.setVar("BB_CURRENTTASK", task[3:]) localdata.setVar("OVERRIDES", "task-%s:%s" % (task[3:], d.getVar("OVERRIDES", False))) localdata.finalize() data.expandKeys(localdata) return localdata
def _task_data(fn, task, d): localdata = data.createCopy(d) localdata.setVar('BB_FILENAME', fn) localdata.setVar('BB_CURRENTTASK', task[3:]) localdata.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], d.getVar('OVERRIDES', False))) localdata.finalize() data.expandKeys(localdata) return localdata
def feeder(lineno, s, fn, data): def getFunc(groupd, key, data): if 'flag' in groupd and groupd['flag'] != None: return bb.data.getVarFlag(key, groupd['flag'], data) else: return bb.data.getVar(key, data) m = __config_regexp__.match(s) if m: groupd = m.groupdict() key = groupd["var"] if "exp" in groupd and groupd["exp"] != None: bb.data.setVarFlag(key, "export", 1, data) if "ques" in groupd and groupd["ques"] != None: val = getFunc(groupd, key, data) if val == None: val = groupd["value"] elif "colon" in groupd and groupd["colon"] != None: e = data.createCopy() bb.data.update_data(e) val = bb.data.expand(groupd["value"], e) elif "append" in groupd and groupd["append"] != None: val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) elif "prepend" in groupd and groupd["prepend"] != None: val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or "")) elif "postdot" in groupd and groupd["postdot"] != None: val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) elif "predot" in groupd and groupd["predot"] != None: val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or "")) else: val = groupd["value"] if 'flag' in groupd and groupd['flag'] != None: bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val)) bb.data.setVarFlag(key, groupd['flag'], val, data) else: bb.data.setVar(key, val, data) return m = __include_regexp__.match(s) if m: s = bb.data.expand(m.group(1), data) bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s)) include(fn, s, data, False) return m = __require_regexp__.match(s) if m: s = bb.data.expand(m.group(1), data) include(fn, s, data, "include required") return m = __export_regexp__.match(s) if m: bb.data.setVarFlag(m.group(1), "export", 1, data) return raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s));
def go(self, loc, ud, d): """Fetch urls""" if not self.forcefetch(loc, ud, d) and Fetch.try_mirror( d, ud.localfile): return svkroot = ud.host + ud.path # pyflakes claims date is not known... it looks right svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmppipe = os.popen( data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: bb.msg.error( bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH." ) raise FetchError(ud.module) # check out sources there os.chdir(tmpfile) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd) myret = os.system(svkcmd) if myret != 0: try: os.rmdir(tmpfile) except OSError: pass raise FetchError(ud.module) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module) # cleanup os.system('rm -rf %s' % tmpfile)
def showEnvironment(self, buildfile=None, pkgs_to_build=[]): """ Show the outer or per-package environment """ fn = None envdata = None if 'world' in pkgs_to_build: print "'world' is not a valid target for --environment." sys.exit(1) if len(pkgs_to_build) > 1: print "Only one target can be used with the --environment option." sys.exit(1) if buildfile: if len(pkgs_to_build) > 0: print "No target should be used with the --environment and --buildfile options." sys.exit(1) self.cb = None self.bb_cache = bb.cache.init(self) fn = self.matchFile(buildfile) if not fn: sys.exit(1) elif len(pkgs_to_build) == 1: self.updateCache() localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort, self.configuration.tryaltconfigs) try: taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) taskdata.add_unresolved(localdata, self.status) except bb.providers.NoProvider: sys.exit(1) targetid = taskdata.getbuild_id(pkgs_to_build[0]) fnid = taskdata.build_targets[targetid][0] fn = taskdata.fn_index[fnid] else: envdata = self.configuration.data if fn: try: envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) except IOError, e: bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) except Exception, e: bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
def clean(self, ud, d): """ Clean CVS Files and tarballs """ pkg = data.expand('${PN}', d) localdata = data.createCopy(d) data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) bb.utils.remove(pkgdir, True) bb.utils.remove(ud.localpath)
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn=None, item=None): """ Find the first provider in pkg_pn with a PREFERRED_VERSION set. """ preferred_file = None preferred_ver = None localdata = data.createCopy(cfgData) bb.data.setVar("OVERRIDES", "%s:pn-%s:%s" % (data.getVar("OVERRIDES", localdata), pn, pn), localdata) bb.data.update_data(localdata) preferred_v = bb.data.getVar("PREFERRED_VERSION", localdata, True) if preferred_v: m = re.match("(\d+:)*(.*)(_.*)*", preferred_v) if m: if m.group(1): preferred_e = int(m.group(1)[:-1]) else: preferred_e = None preferred_v = m.group(2) if m.group(3): preferred_r = m.group(3)[1:] else: preferred_r = None else: preferred_e = None preferred_r = None for file_set in pkg_pn: for f in file_set: pe, pv, pr = dataCache.pkg_pepvpr[f] if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): preferred_file = f preferred_ver = (pe, pv, pr) break if preferred_file: break if preferred_r: pv_str = "%s-%s" % (preferred_v, preferred_r) else: pv_str = preferred_v if not (preferred_e is None): pv_str = "%s:%s" % (preferred_e, pv_str) itemstr = "" if item: itemstr = " (for item %s)" % item if preferred_file is None: logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) else: logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) return (preferred_ver, preferred_file)
def go(self, uri, ud, d): """Fetch urls""" def fetch_uri(uri, ud, d): if os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) fetchcmd = fetchcmd.replace("${URI}", uri) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) ret = os.system(fetchcmd) if ret != 0: return False # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath): bb.msg.debug( 2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath), ) return False return True localdata = data.createCopy(d) data.setVar("OVERRIDES", "wget:" + data.getVar("OVERRIDES", localdata), localdata) data.update_data(localdata) premirrors = [i.split() for i in (data.getVar("PREMIRRORS", localdata, 1) or "").split("\n") if i] for (find, replace) in premirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return if fetch_uri(uri, ud, localdata): return # try mirrors mirrors = [i.split() for i in (data.getVar("MIRRORS", localdata, 1) or "").split("\n") if i] for (find, replace) in mirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return raise FetchError(uri)
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): """ Find the first provider in pkg_pn with a PREFERRED_VERSION set. """ preferred_file = None preferred_ver = None localdata = data.createCopy(cfgData) bb.data.setVar('OVERRIDES', "pn-%s:%s:%s" % (pn, pn, data.getVar('OVERRIDES', localdata)), localdata) bb.data.update_data(localdata) preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) if preferred_v: m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) if m: if m.group(1): preferred_e = int(m.group(1)[:-1]) else: preferred_e = None preferred_v = m.group(2) if m.group(3): preferred_r = m.group(3)[1:] else: preferred_r = None else: preferred_e = None preferred_r = None for file_set in pkg_pn: for f in file_set: pe,pv,pr = dataCache.pkg_pepvpr[f] if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None): preferred_file = f preferred_ver = (pe, pv, pr) break if preferred_file: break; if preferred_r: pv_str = '%s-%s' % (preferred_v, preferred_r) else: pv_str = preferred_v if not (preferred_e is None): pv_str = '%s:%s' % (preferred_e, pv_str) itemstr = "" if item: itemstr = " (for item %s)" % item if preferred_file is None: bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr)) else: bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr)) return (preferred_ver, preferred_file)
def showEnvironment(self , buildfile = None, pkgs_to_build = []): """ Show the outer or per-package environment """ fn = None envdata = None if 'world' in pkgs_to_build: print "'world' is not a valid target for --environment." sys.exit(1) if len(pkgs_to_build) > 1: print "Only one target can be used with the --environment option." sys.exit(1) if buildfile: if len(pkgs_to_build) > 0: print "No target should be used with the --environment and --buildfile options." sys.exit(1) self.cb = None self.bb_cache = bb.cache.init(self) fn = self.matchFile(buildfile) if not fn: sys.exit(1) elif len(pkgs_to_build) == 1: self.updateCache() localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort, self.configuration.tryaltconfigs) try: taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) taskdata.add_unresolved(localdata, self.status) except bb.providers.NoProvider: sys.exit(1) targetid = taskdata.getbuild_id(pkgs_to_build[0]) fnid = taskdata.build_targets[targetid][0] fn = taskdata.fn_index[fnid] else: envdata = self.configuration.data if fn: try: envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) except IOError, e: bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) except Exception, e: bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
def go(self, loc, ud, d): """Fetch urls""" svkroot = ud.host + ud.path svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) logger.debug(2, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmppipe = os.popen( data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: logger.error( "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH." ) raise FetchError(ud.module) # check out sources there os.chdir(tmpfile) logger.info("Fetch " + loc) logger.debug(1, "Running %s", svkcmd) myret = os.system(svkcmd) if myret != 0: try: os.rmdir(tmpfile) except OSError: pass raise FetchError(ud.module) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module) # cleanup bb.utils.prunedir(tmpfile)
def go(self, loc, ud, d): """Fetch urls""" if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): return svkroot = ud.host + ud.path # pyflakes claims date is not known... it looks right svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(ud.module) # check out sources there os.chdir(tmpfile) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd) myret = os.system(svkcmd) if myret != 0: try: os.rmdir(tmpfile) except OSError: pass raise FetchError(ud.module) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module) # cleanup os.system('rm -rf %s' % tmpfile)
def go(self, uri, ud, d): """Fetch urls""" def fetch_uri(uri, ud, d): if os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) fetchcmd = fetchcmd.replace("${URI}", uri) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) ret = os.system(fetchcmd) if ret != 0: return False # check if sourceforge did send us to the mirror page if not os.path.exists(ud.localpath): os.system("rm %s*" % ud.localpath) # FIXME shell quote it bb.msg.debug(2, bb.msg.domain.Fetcher, "sourceforge.net send us to the mirror on %s" % ud.basename) return False return True localdata = data.createCopy(d) data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in premirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return if fetch_uri(uri, ud, localdata): return # try mirrors mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in mirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return raise FetchError(uri)
def go(self, loc, ud, d): """Fetch urls""" svkroot = ud.host + ud.path svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module) if ud.revision: svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module) # create temp directory localdata = data.createCopy(d) data.update_data(localdata) logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(ud.module) # check out sources there os.chdir(tmpfile) logger.info("Fetch " + loc) logger.debug(1, "Running %s", svkcmd) myret = os.system(svkcmd) if myret != 0: try: os.rmdir(tmpfile) except OSError: pass raise FetchError(ud.module) os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) # tar them up to a defined filename myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module) # cleanup bb.utils.prunedir(tmpfile)
def load_bbfile( bbfile ): """Load and parse one .bb build file""" if not cache in [None, '']: cache_bbfile = bbfile.replace( '/', '_' ) try: cache_mtime = os.stat( "%s/%s" % ( cache, cache_bbfile ) )[8] except OSError: cache_mtime = 0 file_mtime = parse.cached_mtime(bbfile) if file_mtime > cache_mtime: #print " : '%s' dirty. reparsing..." % bbfile pass else: #print " : '%s' clean. loading from cache..." % bbfile cache_data = unpickle_bb( cache_bbfile ) if deps_clean(cache_data): return cache_data, True topdir = data.getVar('TOPDIR', cfg) if not topdir: topdir = os.path.abspath(os.getcwd()) # set topdir to here data.setVar('TOPDIR', topdir, cfg) bbfile = os.path.abspath(bbfile) bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) # expand tmpdir to include this topdir data.setVar('TMPDIR', data.getVar('TMPDIR', cfg, 1) or "", cfg) # set topdir to location of .bb file topdir = bbfile_loc #data.setVar('TOPDIR', topdir, cfg) # go there oldpath = os.path.abspath(os.getcwd()) os.chdir(topdir) bb = data.createCopy(cfg) try: parse.handle(bbfile, bb) # read .bb data if not cache in [None, '']: pickle_bb( cache_bbfile, bb) # write cache os.chdir(oldpath) return bb, False finally: os.chdir(oldpath)
def _get_layer_collections(layer_path, lconf=None, data=None): import bb.parse import bb.data if lconf is None: lconf = os.path.join(layer_path, 'conf', 'layer.conf') if data is None: ldata = bb.data.init() bb.parse.init_parser(ldata) else: ldata = data.createCopy() ldata.setVar('LAYERDIR', layer_path) try: ldata = bb.parse.handle(lconf, ldata, include=True) except: raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path) ldata.expandVarref('LAYERDIR') collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split() if not collections: name = os.path.basename(layer_path) collections = [name] collections = {c: {} for c in collections} for name in collections: priority = ldata.getVar('BBFILE_PRIORITY_%s' % name) pattern = ldata.getVar('BBFILE_PATTERN_%s' % name) depends = ldata.getVar('LAYERDEPENDS_%s' % name) compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name) try: depDict = bb.utils.explode_dep_versions2(depends or "") except bb.utils.VersionStringException as vse: bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse))) collections[name]['priority'] = priority collections[name]['pattern'] = pattern collections[name]['depends'] = ' '.join(depDict.keys()) collections[name]['compat'] = compat return collections
def go(self, uri, ud, d, checkonly=False): """Fetch urls""" def fetch_uri(uri, ud, d): if checkonly: fetchcmd = data.getVar("CHECKCOMMAND", d, 1) elif os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) uri = uri.split(";")[0] uri_decoded = list(decodeurl(uri)) uri_type = uri_decoded[0] uri_host = uri_decoded[1] fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0]) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) logger.info("fetch " + uri) logger.debug(2, "executing " + fetchcmd) runfetchcmd(fetchcmd, d) # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath) and not checkonly: logger.debug( 2, "The fetch command for %s returned success but %s doesn't exist?...", uri, ud.localpath) return False return True localdata = data.createCopy(d) data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) if fetch_uri(uri, ud, localdata): return True raise FetchError(uri)
def build( self, params, cmd = "build" ): """Build a providee""" global last_exception globexpr = params[0] self._checkParsed() names = globfilter( cooker.status.pkg_pn.keys(), globexpr ) if len( names ) == 0: names = [ globexpr ] print "SHELL: Building %s" % ' '.join( names ) oldcmd = cooker.configuration.cmd cooker.configuration.cmd = cmd td = taskdata.TaskData(cooker.configuration.abort) localdata = data.createCopy(cooker.configuration.data) data.update_data(localdata) data.expandKeys(localdata) try: tasks = [] for name in names: td.add_provider(localdata, cooker.status, name) providers = td.get_provider(name) if len(providers) == 0: raise Providers.NoProvider tasks.append([name, "do_%s" % cooker.configuration.cmd]) td.add_unresolved(localdata, cooker.status) rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) rq.prepare_runqueue() rq.execute_runqueue() except Providers.NoProvider: print "ERROR: No Provider" last_exception = Providers.NoProvider except runqueue.TaskFailure, fnids: for fnid in fnids: print "ERROR: '%s' failed" % td.fn_index[fnid] last_exception = runqueue.TaskFailure
def build(self, params, cmd="build"): """Build a providee""" global last_exception globexpr = params[0] self._checkParsed() names = globfilter(cooker.status.pkg_pn.keys(), globexpr) if len(names) == 0: names = [globexpr] print "SHELL: Building %s" % ' '.join(names) oldcmd = cooker.configuration.cmd cooker.configuration.cmd = cmd td = taskdata.TaskData(cooker.configuration.abort) localdata = data.createCopy(cooker.configuration.data) data.update_data(localdata) data.expandKeys(localdata) try: tasks = [] for name in names: td.add_provider(localdata, cooker.status, name) providers = td.get_provider(name) if len(providers) == 0: raise Providers.NoProvider tasks.append([name, "do_%s" % cooker.configuration.cmd]) td.add_unresolved(localdata, cooker.status) rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) rq.prepare_runqueue() rq.execute_runqueue() except Providers.NoProvider: print "ERROR: No Provider" last_exception = Providers.NoProvider except runqueue.TaskFailure, fnids: for fnid in fnids: print "ERROR: '%s' failed" % td.fn_index[fnid] last_exception = runqueue.TaskFailure
def build(self, params, cmd="build"): """Build a providee""" global last_exception globexpr = params[0] self._checkParsed() names = globfilter(cooker.status.pkg_pn, globexpr) if len(names) == 0: names = [globexpr] print("SHELL: Building %s" % " ".join(names)) td = taskdata.TaskData(cooker.configuration.abort) localdata = data.createCopy(cooker.configuration.data) data.update_data(localdata) data.expandKeys(localdata) try: tasks = [] for name in names: td.add_provider(localdata, cooker.status, name) providers = td.get_provider(name) if len(providers) == 0: raise Providers.NoProvider tasks.append([name, "do_%s" % cmd]) td.add_unresolved(localdata, cooker.status) rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) rq.prepare_runqueue() rq.execute_runqueue() except Providers.NoProvider: print("ERROR: No Provider") last_exception = Providers.NoProvider except runqueue.TaskFailure as fnids: last_exception = runqueue.TaskFailure except build.FuncFailed as e: print("ERROR: Couldn't build '%s'" % names) last_exception = e
def findProviders(cfgData, dataCache, pkg_pn = None): """ Convenience function to get latest and preferred providers in pkg_pn """ if not pkg_pn: pkg_pn = dataCache.pkg_pn # Need to ensure data store is expanded localdata = data.createCopy(cfgData) bb.data.expandKeys(localdata) preferred_versions = {} latest_versions = {} for pn in pkg_pn: (last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn) preferred_versions[pn] = (pref_ver, pref_file) latest_versions[pn] = (last_ver, last_file) return (latest_versions, preferred_versions)
def go(self, uri, ud, d, checkonly = False): """Fetch urls""" def fetch_uri(uri, ud, d): if checkonly: fetchcmd = data.getVar("CHECKCOMMAND", d, 1) elif os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) uri = uri.split(";")[0] uri_decoded = list(decodeurl(uri)) uri_type = uri_decoded[0] uri_host = uri_decoded[1] fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0]) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) logger.info("fetch " + uri) logger.debug(2, "executing " + fetchcmd) runfetchcmd(fetchcmd, d) # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath) and not checkonly: logger.debug(2, "The fetch command for %s returned success but %s doesn't exist?...", uri, ud.localpath) return False return True localdata = data.createCopy(d) data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) if fetch_uri(uri, ud, localdata): return True raise FetchError(uri)
def showEnvironment(self, buildfile=None, pkgs_to_build=[]): """ Show the outer or per-package environment """ fn = None envdata = None if buildfile: self.cb = None self.bb_cache = bb.cache.init(self) fn = self.matchFile(buildfile) elif len(pkgs_to_build) == 1: self.updateCache() localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) taskdata.add_unresolved(localdata, self.status) targetid = taskdata.getbuild_id(pkgs_to_build[0]) fnid = taskdata.build_targets[targetid][0] fn = taskdata.fn_index[fnid] else: envdata = self.configuration.data if fn: try: envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) except IOError, e: bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) raise except Exception, e: bb.msg.error(bb.msg.domain.Parsing, "%s" % e) raise
def showEnvironment(self, buildfile = None, pkgs_to_build = []): """ Show the outer or per-package environment """ fn = None envdata = None if buildfile: self.cb = None self.bb_cache = bb.cache.init(self) fn = self.matchFile(buildfile) elif len(pkgs_to_build) == 1: self.updateCache() localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) taskdata.add_unresolved(localdata, self.status) targetid = taskdata.getbuild_id(pkgs_to_build[0]) fnid = taskdata.build_targets[targetid][0] fn = taskdata.fn_index[fnid] else: envdata = self.configuration.data if fn: try: envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) except IOError, e: bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) raise except Exception, e: bb.msg.error(bb.msg.domain.Parsing, "%s" % e) raise
def _get_layer_collections(layer_path, lconf=None, data=None): import bb.parse import bb.data if lconf is None: lconf = os.path.join(layer_path, 'conf', 'layer.conf') if data is None: ldata = bb.data.init() bb.parse.init_parser(ldata) else: ldata = data.createCopy() ldata.setVar('LAYERDIR', layer_path) try: ldata = bb.parse.handle(lconf, ldata, include=True) except: raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path) ldata.expandVarref('LAYERDIR') collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split() if not collections: name = os.path.basename(layer_path) collections = [name] collections = {c: {} for c in collections} for name in collections: priority = ldata.getVar('BBFILE_PRIORITY_%s' % name) pattern = ldata.getVar('BBFILE_PATTERN_%s' % name) depends = ldata.getVar('LAYERDEPENDS_%s' % name) compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name) collections[name]['priority'] = priority collections[name]['pattern'] = pattern collections[name]['depends'] = depends collections[name]['compat'] = compat return collections
def download(self, loc, ud, d): method = ud.parm.get('method', 'pserver') localdir = ud.parm.get('localdir', ud.module) cvs_port = ud.parm.get('port', '') cvs_rsh = None if method == "ext": if "rsh" in ud.parm: cvs_rsh = ud.parm["rsh"] if method == "dir": cvsroot = ud.path else: cvsroot = ":" + method cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True) if cvsproxyhost: cvsroot += ";proxy=" + cvsproxyhost cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True) if cvsproxyport: cvsroot += ";proxyport=" + cvsproxyport cvsroot += ":" + ud.user if ud.pswd: cvsroot += ":" + ud.pswd cvsroot += "@" + ud.host + ":" + cvs_port + ud.path options = [] if 'norecurse' in ud.parm: options.append("-l") if ud.date: # treat YYYYMMDDHHMM specially for CVS if len(ud.date) == 12: options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) else: options.append("-D \"%s UTC\"" % ud.date) if ud.tag: options.append("-r %s" % ud.tag) localdata = data.createCopy(d) data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) data.setVar('CVSROOT', cvsroot, localdata) data.setVar('CVSCOOPTS', " ".join(options), localdata) data.setVar('CVSMODULE', ud.module, localdata) cvscmd = data.getVar('FETCHCOMMAND', localdata, True) cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, True) if cvs_rsh: cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) # create module directory logger.debug(2, "Fetch: checking for module directory") pkg = data.expand('${PN}', d) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) moddir = os.path.join(pkgdir, localdir) if os.access(os.path.join(moddir, 'CVS'), os.R_OK): logger.info("Update " + loc) bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url) # update sources there os.chdir(moddir) cmd = cvsupdatecmd else: logger.info("Fetch " + loc) # check out sources there bb.utils.mkdirhier(pkgdir) os.chdir(pkgdir) logger.debug(1, "Running %s", cvscmd) bb.fetch2.check_network_access(d, cvscmd, ud.url) cmd = cvscmd runfetchcmd(cmd, d, cleanup = [moddir]) if not os.access(moddir, os.R_OK): raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url) scmdata = ud.parm.get("scmdata", "") if scmdata == "keep": tar_flags = "" else: tar_flags = "--exclude 'CVS'" # tar them up to a defined filename if 'fullpath' in ud.parm: os.chdir(pkgdir) cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir) else: os.chdir(moddir) os.chdir('..') cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir)) runfetchcmd(cmd, d, cleanup = [ud.localpath])
self.cookerIdle = True self.command.finishAsyncCommand() bb.event.fire( bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures)) return retval self.buildSetVars() buildname = bb.data.getVar("BUILDNAME", self.configuration.data) bb.event.fire( bb.event.BuildStarted(buildname, targets, self.configuration.event_data)) localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) runlist = [] for k in targets: taskdata.add_provider(localdata, self.status, k) runlist.append([k, "do_%s" % task]) taskdata.add_unresolved(localdata, self.status) rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) self.cookerIdle = False
def go(self, loc, ud, d): # try to use the tarball stash if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath) return method = "pserver" if "method" in ud.parm: method = ud.parm["method"] localdir = ud.module if "localdir" in ud.parm: localdir = ud.parm["localdir"] cvs_port = "" if "port" in ud.parm: cvs_port = ud.parm["port"] cvs_rsh = None if method == "ext": if "rsh" in ud.parm: cvs_rsh = ud.parm["rsh"] if method == "dir": cvsroot = ud.path else: cvsroot = ":" + method cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True) if cvsproxyhost: cvsroot += ";proxy=" + cvsproxyhost cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True) if cvsproxyport: cvsroot += ";proxyport=" + cvsproxyport cvsroot += ":" + ud.user if ud.pswd: cvsroot += ":" + ud.pswd cvsroot += "@" + ud.host + ":" + cvs_port + ud.path options = [] if 'norecurse' in ud.parm: options.append("-l") if ud.date: # treat YYYYMMDDHHMM specially for CVS if len(ud.date) == 12: options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) else: options.append("-D \"%s UTC\"" % ud.date) if ud.tag: options.append("-r %s" % ud.tag) localdata = data.createCopy(d) data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) data.setVar('CVSROOT', cvsroot, localdata) data.setVar('CVSCOOPTS', " ".join(options), localdata) data.setVar('CVSMODULE', ud.module, localdata) cvscmd = data.getVar('FETCHCOMMAND', localdata, 1) cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1) if cvs_rsh: cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) # create module directory bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") pkg = data.expand('${PN}', d) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) moddir = os.path.join(pkgdir,localdir) if os.access(os.path.join(moddir,'CVS'), os.R_OK): bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) # update sources there os.chdir(moddir) myret = os.system(cvsupdatecmd) else: bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) # check out sources there bb.mkdirhier(pkgdir) os.chdir(pkgdir) bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd) myret = os.system(cvscmd) if myret != 0 or not os.access(moddir, os.R_OK): try: os.rmdir(moddir) except OSError: pass raise FetchError(ud.module) # tar them up to a defined filename if 'fullpath' in ud.parm: os.chdir(pkgdir) myret = os.system("tar -czf %s %s" % (ud.localpath, localdir)) else: os.chdir(moddir) os.chdir('..') myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module)
def go(self, loc, ud, d): """ Fetch urls """ # try to use the tarball stash if Fetch.try_mirror(d, ud.localfile): bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath) return (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] else: path = depot if "module" in parm: module = parm["module"] else: module = os.path.basename(path) localdata = data.createCopy(d) data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) # Get the p4 command if user: data.setVar('P4USER', user, localdata) if pswd: data.setVar('P4PASSWD', pswd, localdata) if host: data.setVar('P4PORT', host, localdata) p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) # create temp directory bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(module) if "label" in parm: depot = "%s@%s" % (depot,parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) depot = "%s@%s" % (depot,cset) os.chdir(tmpfile) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot)) p4file = os.popen("%s files %s" % (p4cmd, depot)) if not p4file: bb.error("Fetch: unable to get the P4 files from %s" % (depot)) raise FetchError(module) count = 0 for file in p4file: list = file.split() if list[2] == "delete": continue dest = list[0][len(path)+1:] where = dest.find("#") os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0])) count = count + 1 if count == 0: bb.error("Fetch: No files gathered from the P4 fetch") raise FetchError(module) myret = os.system("tar -czf %s %s" % (ud.localpath, module)) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(module) # cleanup os.system('rm -rf %s' % tmpfile)
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): """ Find the first provider in pkg_pn with a PREFERRED_VERSION set. """ preferred_file = None preferred_ver = None localdata = data.createCopy(cfgData) localdata.setVar('OVERRIDES', "%s:pn-%s:%s" % (data.getVar('OVERRIDES', localdata), pn, pn)) bb.data.update_data(localdata) preferred_v = localdata.getVar('PREFERRED_VERSION', True) if preferred_v: m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) if m: if m.group(1): preferred_e = m.group(1)[:-1] else: preferred_e = None preferred_v = m.group(2) if m.group(3): preferred_r = m.group(3)[1:] else: preferred_r = None else: preferred_e = None preferred_r = None for file_set in pkg_pn: for f in file_set: pe, pv, pr = dataCache.pkg_pepvpr[f] if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): preferred_file = f preferred_ver = (pe, pv, pr) break if preferred_file: break; if preferred_r: pv_str = '%s-%s' % (preferred_v, preferred_r) else: pv_str = preferred_v if not (preferred_e is None): pv_str = '%s:%s' % (preferred_e, pv_str) itemstr = "" if item: itemstr = " (for item %s)" % item if preferred_file is None: logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) available_vers = [] for file_set in pkg_pn: for f in file_set: pe, pv, pr = dataCache.pkg_pepvpr[f] ver_str = pv if pe: ver_str = "%s:%s" % (pe, ver_str) if not ver_str in available_vers: available_vers.append(ver_str) if available_vers: available_vers.sort() logger.info("versions of %s available: %s", pn, ' '.join(available_vers)) else: logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) return (preferred_ver, preferred_file)
def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): """ If there is a PREFERRED_VERSION, find the highest-priority bbfile providing that version. If not, find the latest version provided by an bbfile in the highest-priority set. """ if not pkg_pn: pkg_pn = dataCache.pkg_pn files = pkg_pn[pn] priorities = {} for f in files: priority = dataCache.bbfile_priority[f] if priority not in priorities: priorities[priority] = [] priorities[priority].append(f) p_list = priorities.keys() p_list.sort(lambda a, b: a - b) tmp_pn = [] for p in p_list: tmp_pn = [priorities[p]] + tmp_pn preferred_file = None localdata = data.createCopy(cfgData) bb.data.setVar('OVERRIDES', "%s:%s" % (pn, data.getVar('OVERRIDES', localdata)), localdata) bb.data.update_data(localdata) preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) if preferred_v: m = re.match('(.*)_(.*)', preferred_v) if m: preferred_v = m.group(1) preferred_r = m.group(2) else: preferred_r = None for file_set in tmp_pn: for f in file_set: pv,pr = dataCache.pkg_pvpr[f] if preferred_v == pv and (preferred_r == pr or preferred_r == None): preferred_file = f preferred_ver = (pv, pr) break if preferred_file: break; if preferred_r: pv_str = '%s-%s' % (preferred_v, preferred_r) else: pv_str = preferred_v itemstr = "" if item: itemstr = " (for item %s)" % item if preferred_file is None: bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr)) else: bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr)) del localdata # get highest priority file set files = tmp_pn[0] latest = None latest_p = 0 latest_f = None for file_name in files: pv,pr = dataCache.pkg_pvpr[file_name] dp = dataCache.pkg_dp[file_name] if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pv, pr)) < 0)) or (dp > latest_p): latest = (pv, pr) latest_f = file_name latest_p = dp if preferred_file is None: preferred_file = latest_f preferred_ver = latest return (latest,latest_f,preferred_ver, preferred_file)
def go(self, loc, ud, d): """ Fetch urls """ # try to use the tarball stash if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath) return (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] else: path = depot if "module" in parm: module = parm["module"] else: module = os.path.basename(path) localdata = data.createCopy(d) data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) # Get the p4 command if user: data.setVar('P4USER', user, localdata) if pswd: data.setVar('P4PASSWD', pswd, localdata) if host: data.setVar('P4PORT', host, localdata) p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) # create temp directory bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(module) if "label" in parm: depot = "%s@%s" % (depot,parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) depot = "%s@%s" % (depot,cset) os.chdir(tmpfile) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot)) p4file = os.popen("%s files %s" % (p4cmd, depot)) if not p4file: bb.error("Fetch: unable to get the P4 files from %s" % (depot)) raise FetchError(module) count = 0 for file in p4file: list = file.split() if list[2] == "delete": continue dest = list[0][len(path)+1:] where = dest.find("#") os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0])) count = count + 1 if count == 0: bb.error("Fetch: No files gathered from the P4 fetch") raise FetchError(module) myret = os.system("tar -czf %s %s" % (ud.localpath, module)) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(module) # cleanup os.system('rm -rf %s' % tmpfile)
def go(self, uri, ud, d, checkonly = False): """Fetch urls""" def fetch_uri(uri, ud, d): if checkonly: fetchcmd = data.getVar("CHECKCOMMAND", d, 1) elif os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) uri = uri.split(";")[0] uri_decoded = list(bb.decodeurl(uri)) uri_type = uri_decoded[0] uri_host = uri_decoded[1] bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0]) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) httpproxy = None ftpproxy = None if uri_type == 'http': httpproxy = data.getVar("HTTP_PROXY", d, True) httpproxy_ignore = (data.getVar("HTTP_PROXY_IGNORE", d, True) or "").split() for p in httpproxy_ignore: if uri_host.endswith(p): httpproxy = None break if uri_type == 'ftp': ftpproxy = data.getVar("FTP_PROXY", d, True) ftpproxy_ignore = (data.getVar("HTTP_PROXY_IGNORE", d, True) or "").split() for p in ftpproxy_ignore: if uri_host.endswith(p): ftpproxy = None break if httpproxy: fetchcmd = "http_proxy=" + httpproxy + " " + fetchcmd if ftpproxy: fetchcmd = "ftp_proxy=" + ftpproxy + " " + fetchcmd bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) ret = os.system(fetchcmd) if ret != 0: return False # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath): bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath)) return False return True localdata = data.createCopy(d) data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in premirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return True if fetch_uri(uri, ud, localdata): return True # try mirrors mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in mirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return True raise FetchError(uri)
def go(self, loc, ud, d): """ Fetch urls """ (host, depot, user, pswd, parm) = Perforce.doparse(loc, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] else: path = depot module = parm.get('module', os.path.basename(path)) localdata = data.createCopy(d) data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) # Get the p4 command p4opt = "" if user: p4opt += " -u %s" % (user) if pswd: p4opt += " -P %s" % (pswd) if host: p4opt += " -p %s" % (host) p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) # create temp directory logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(module) if "label" in parm: depot = "%s@%s" % (depot, parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) depot = "%s@%s" % (depot, cset) os.chdir(tmpfile) logger.info("Fetch " + loc) logger.info("%s%s files %s", p4cmd, p4opt, depot) p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot)) if not p4file: logger.error("Fetch: unable to get the P4 files from %s", depot) raise FetchError(module) count = 0 for file in p4file: list = file.split() if list[2] == "delete": continue dest = list[0][len(path)+1:] where = dest.find("#") os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0])) count = count + 1 if count == 0: logger.error("Fetch: No files gathered from the P4 fetch") raise FetchError(module) myret = os.system("tar -czf %s %s" % (ud.localpath, module)) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(module) # cleanup bb.utils.prunedir(tmpfile)
def go(self, d, urls = []): """Fetch urls""" if not urls: urls = self.urls localdata = data.createCopy(d) data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) for loc in urls: (type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, localdata)) if not "module" in parm: raise MissingParameterError("cvs method needs a 'module' parameter") else: module = parm["module"] dlfile = self.localpath(loc, localdata) dldir = data.getVar('DL_DIR', localdata, 1) # if local path contains the cvs # module, consider the dir above it to be the # download directory # pos = dlfile.find(module) # if pos: # dldir = dlfile[:pos] # else: # dldir = os.path.dirname(dlfile) # setup cvs options options = [] if 'tag' in parm: tag = parm['tag'] else: tag = "" if 'date' in parm: date = parm['date'] else: if not tag: date = Fetch.getSRCDate(d) else: date = "" if "method" in parm: method = parm["method"] else: method = "pserver" if "localdir" in parm: localdir = parm["localdir"] else: localdir = module cvs_rsh = None if method == "ext": if "rsh" in parm: cvs_rsh = parm["rsh"] tarfn = data.expand('%s_%s_%s_%s.tar.gz' % (module.replace('/', '.'), host, tag, date), localdata) data.setVar('TARFILES', dlfile, localdata) data.setVar('TARFN', tarfn, localdata) dl = os.path.join(dldir, tarfn) if os.access(dl, os.R_OK): bb.debug(1, "%s already exists, skipping cvs checkout." % tarfn) continue # try to use the tarball stash if Fetch.try_mirror(d, tarfn): continue if date: options.append("-D %s" % date) if tag: options.append("-r %s" % tag) olddir = os.path.abspath(os.getcwd()) os.chdir(data.expand(dldir, localdata)) # setup cvsroot if method == "dir": cvsroot = path else: cvsroot = ":" + method + ":" + user if pswd: cvsroot += ":" + pswd cvsroot += "@" + host + ":" + path data.setVar('CVSROOT', cvsroot, localdata) data.setVar('CVSCOOPTS', " ".join(options), localdata) data.setVar('CVSMODULE', module, localdata) cvscmd = data.getVar('FETCHCOMMAND', localdata, 1) cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1) if cvs_rsh: cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) # create module directory bb.debug(2, "Fetch: checking for module directory") pkg=data.expand('${PN}', d) pkgdir=os.path.join(data.expand('${CVSDIR}', localdata), pkg) moddir=os.path.join(pkgdir,localdir) if os.access(os.path.join(moddir,'CVS'), os.R_OK): bb.note("Update " + loc) # update sources there os.chdir(moddir) myret = os.system(cvsupdatecmd) else: bb.note("Fetch " + loc) # check out sources there bb.mkdirhier(pkgdir) os.chdir(pkgdir) bb.debug(1, "Running %s" % cvscmd) myret = os.system(cvscmd) if myret != 0 or not os.access(moddir, os.R_OK): try: os.rmdir(moddir) except OSError: pass raise FetchError(module) os.chdir(moddir) os.chdir('..') # tar them up to a defined filename myret = os.system("tar -czf %s %s" % (os.path.join(dldir,tarfn), os.path.basename(moddir))) if myret != 0: try: os.unlink(tarfn) except OSError: pass os.chdir(olddir) del localdata
def generateDepTreeData(self, pkgs_to_build, task): """ Create a dependency tree of pkgs_to_build, returning the data. """ # Need files parsed self.updateCache() # If we are told to do the None task then query the default task if (task == None): task = self.configuration.cmd pkgs_to_build = self.checkPackages(pkgs_to_build) localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) runlist = [] for k in pkgs_to_build: taskdata.add_provider(localdata, self.status, k) runlist.append([k, "do_%s" % task]) taskdata.add_unresolved(localdata, self.status) rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) rq.prepare_runqueue() seen_fnids = [] depend_tree = {} depend_tree["depends"] = {} depend_tree["tdepends"] = {} depend_tree["pn"] = {} depend_tree["rdepends-pn"] = {} depend_tree["packages"] = {} depend_tree["rdepends-pkg"] = {} depend_tree["rrecs-pkg"] = {} for task in range(len(rq.runq_fnid)): taskname = rq.runq_task[task] fnid = rq.runq_fnid[task] fn = taskdata.fn_index[fnid] pn = self.status.pkg_fn[fn] version = "%s:%s-%s" % self.status.pkg_pepvpr[fn] if pn not in depend_tree["pn"]: depend_tree["pn"][pn] = {} depend_tree["pn"][pn]["filename"] = fn depend_tree["pn"][pn]["version"] = version for dep in rq.runq_depends[task]: depfn = taskdata.fn_index[rq.runq_fnid[dep]] deppn = self.status.pkg_fn[depfn] dotname = "%s.%s" % (pn, rq.runq_task[task]) if not dotname in depend_tree["tdepends"]: depend_tree["tdepends"][dotname] = [] depend_tree["tdepends"][dotname].append( "%s.%s" % (deppn, rq.runq_task[dep])) if fnid not in seen_fnids: seen_fnids.append(fnid) packages = [] depend_tree["depends"][pn] = [] for dep in taskdata.depids[fnid]: depend_tree["depends"][pn].append( taskdata.build_names_index[dep]) depend_tree["rdepends-pn"][pn] = [] for rdep in taskdata.rdepids[fnid]: depend_tree["rdepends-pn"][pn].append( taskdata.run_names_index[rdep]) rdepends = self.status.rundeps[fn] for package in rdepends: depend_tree["rdepends-pkg"][package] = [] for rdepend in rdepends[package]: depend_tree["rdepends-pkg"][package].append(rdepend) packages.append(package) rrecs = self.status.runrecs[fn] for package in rrecs: depend_tree["rrecs-pkg"][package] = [] for rdepend in rrecs[package]: depend_tree["rrecs-pkg"][package].append(rdepend) if not package in packages: packages.append(package) for package in packages: if package not in depend_tree["packages"]: depend_tree["packages"][package] = {} depend_tree["packages"][package]["pn"] = pn depend_tree["packages"][package]["filename"] = fn depend_tree["packages"][package]["version"] = version return depend_tree
def go(self, uri, ud, d, checkonly=False): """Fetch urls""" def fetch_uri(uri, ud, d): if checkonly: fetchcmd = data.getVar("CHECKCOMMAND", d, 1) elif os.path.exists(ud.localpath): # file exists, but we didnt complete it.. trying again.. fetchcmd = data.getVar("RESUMECOMMAND", d, 1) else: fetchcmd = data.getVar("FETCHCOMMAND", d, 1) bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) fetchcmd = fetchcmd.replace("${URI}", uri) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) ret = os.system(fetchcmd) if ret != 0: return False # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath): bb.msg.debug( 2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath)) return False return True localdata = data.createCopy(d) data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) premirrors = [ i.split() for i in ( data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in premirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return True if fetch_uri(uri, ud, localdata): return True # try mirrors mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ] for (find, replace) in mirrors: newuri = uri_replace(uri, find, replace, d) if newuri != uri: if fetch_uri(newuri, ud, localdata): return True raise FetchError(uri)
def generateDotGraph( self, pkgs_to_build, ignore_deps ): """ Generate a task dependency graph. pkgs_to_build A list of packages that needs to be built ignore_deps A list of names where processing of dependencies should be stopped. e.g. dependencies that get """ for dep in ignore_deps: self.status.ignored_dependencies.add(dep) localdata = data.createCopy(self.configuration.data) bb.data.update_data(localdata) bb.data.expandKeys(localdata) taskdata = bb.taskdata.TaskData(self.configuration.abort) runlist = [] try: for k in pkgs_to_build: taskdata.add_provider(localdata, self.status, k) runlist.append([k, "do_%s" % self.configuration.cmd]) taskdata.add_unresolved(localdata, self.status) except bb.providers.NoProvider: sys.exit(1) rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) rq.prepare_runqueue() seen_fnids = [] depends_file = file('depends.dot', 'w' ) tdepends_file = file('task-depends.dot', 'w' ) print >> depends_file, "digraph depends {" print >> tdepends_file, "digraph depends {" for task in range(len(rq.runq_fnid)): taskname = rq.runq_task[task] fnid = rq.runq_fnid[task] fn = taskdata.fn_index[fnid] pn = self.status.pkg_fn[fn] version = "%s:%s-%s" % self.status.pkg_pepvpr[fn] print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn) for dep in rq.runq_depends[task]: depfn = taskdata.fn_index[rq.runq_fnid[dep]] deppn = self.status.pkg_fn[depfn] print >> tdepends_file, '"%s.%s" -> "%s.%s"' % (pn, rq.runq_task[task], deppn, rq.runq_task[dep]) if fnid not in seen_fnids: seen_fnids.append(fnid) packages = [] print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) for depend in self.status.deps[fn]: print >> depends_file, '"%s" -> "%s"' % (pn, depend) rdepends = self.status.rundeps[fn] for package in rdepends: for rdepend in rdepends[package]: print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) packages.append(package) rrecs = self.status.runrecs[fn] for package in rrecs: for rdepend in rrecs[package]: print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) if not package in packages: packages.append(package) for package in packages: if package != pn: print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn) for depend in self.status.deps[fn]: print >> depends_file, '"%s" -> "%s"' % (package, depend) # Prints a flattened form of the above where subpackages of a package are merged into the main pn #print >> depends_file, '"%s" [label="%s %s\\n%s\\n%s"]' % (pn, pn, taskname, version, fn) #for rdep in taskdata.rdepids[fnid]: # print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, taskdata.run_names_index[rdep]) #for dep in taskdata.depids[fnid]: # print >> depends_file, '"%s" -> "%s"' % (pn, taskdata.build_names_index[dep]) print >> depends_file, "}" print >> tdepends_file, "}" bb.msg.note(1, bb.msg.domain.Collection, "Dependencies saved to 'depends.dot'") bb.msg.note(1, bb.msg.domain.Collection, "Task dependencies saved to 'task-depends.dot'")
def download(self, ud, d): """ Fetch urls """ (host, depot, user, pswd, parm) = Perforce.doparse(ud.url, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] else: path = depot module = parm.get('module', os.path.basename(path)) localdata = data.createCopy(d) data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) # Get the p4 command p4opt = "" if user: p4opt += " -u %s" % (user) if pswd: p4opt += " -P %s" % (pswd) if host: p4opt += " -p %s" % (host) p4cmd = data.getVar('FETCHCOMMAND', localdata, True) # create temp directory logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false") tmpfile = tmpfile.strip() if not tmpfile: raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url) if "label" in parm: depot = "%s@%s" % (depot, parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) depot = "%s@%s" % (depot, cset) os.chdir(tmpfile) logger.info("Fetch " + ud.url) logger.info("%s%s files %s", p4cmd, p4opt, depot) p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot)) p4file = [f.rstrip() for f in p4file.splitlines()] if not p4file: raise FetchError("Fetch: unable to get the P4 files from %s" % depot, ud.url) count = 0 for file in p4file: list = file.split() if list[2] == "delete": continue dest = list[0][len(path)+1:] where = dest.find("#") subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True) count = count + 1 if count == 0: logger.error() raise FetchError("Fetch: No files gathered from the P4 fetch", ud.url) runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath]) # cleanup bb.utils.prunedir(tmpfile)