def cache(dn): if get_cache(dn): app.log(dn, "Bah! I could have cached", cache_key(dn)) return tempfile.tempdir = app.config['tmp'] tmpdir = tempfile.mkdtemp() cachefile = os.path.join(tmpdir, cache_key(dn)) if dn.get('kind') == "system": utils.hardlink_all_files(dn['install'], dn['sandbox']) shutil.rmtree(dn['checkout']) utils.set_mtime_recursively(dn['install']) utils.make_deterministic_tar_archive(cachefile, dn['install']) shutil.move('%s.tar' % cachefile, cachefile) else: utils.set_mtime_recursively(dn['install']) utils.make_deterministic_gztar_archive(cachefile, dn['install']) shutil.move('%s.tar.gz' % cachefile, cachefile) app.config['counter'].increment() unpack(dn, cachefile) if app.config.get('kbas-password', 'insecure') != 'insecure' and \ app.config.get('kbas-url') is not None: if dn.get('kind', 'chunk') in app.config.get('kbas-upload', 'chunk'): with app.timer(dn, 'upload'): upload(dn)
def _tidy(self, this): ''' Load a single definition file ''' self._fix_path_name(this) # handle morph syntax oddities... for system in this.get('systems', []): self._fix_path_name(system) for subsystem in system.get('subsystems', []): self._fix_path_name(subsystem) for index, component in enumerate(this.get('build-depends', [])): self._fix_path_name(component) this['build-depends'][index] = self._insert(component) for subset in ['chunks', 'strata']: if this.get(subset): this['contents'] = this.pop(subset) lookup = {} for index, component in enumerate(this.get('contents', [])): self._fix_path_name(component) lookup[component['name']] = component['path'] if component['name'] == this['name']: app.log(this, 'WARNING: %s contains' % this['name'], component['name']) for x, it in enumerate(component.get('build-depends', [])): component['build-depends'][x] = lookup.get(it, it) component['build-depends'] = (this.get('build-depends', []) + component.get('build-depends', [])) this['contents'][index] = self._insert(component) return self._insert(this)
def setup(this): currentdir = os.getcwd() tempfile.tempdir = app.config['tmp'] this['sandbox'] = tempfile.mkdtemp() os.environ['TMPDIR'] = app.config['tmp'] app.config['sandboxes'] += [this['sandbox']] this['build'] = os.path.join(this['sandbox'], this['name'] + '.build') this['install'] = os.path.join(this['sandbox'], this['name'] + '.inst') this['baserockdir'] = os.path.join(this['install'], 'baserock') this['tmp'] = os.path.join(this['sandbox'], 'tmp') for directory in ['build', 'install', 'tmp', 'baserockdir']: os.makedirs(this[directory]) this['log'] = os.path.join(app.config['artifacts'], this['cache'] + '.build-log') if app.config.get('instances'): this['log'] += '.' + str(app.config.get('fork', 0)) assembly_dir = this['sandbox'] for directory in ['dev', 'tmp']: call(['mkdir', '-p', os.path.join(assembly_dir, directory)]) try: yield except app.RetryException as e: raise e except: import traceback app.log(this, 'ERROR: a surprise exception happened', '') traceback.print_exc() app.exit(this, 'ERROR: sandbox debris is at', this['sandbox']) finally: pass if app.config.get('log-verbose'): app.log(this, "Removing sandbox dir", this['sandbox']) app.remove_dir(this['sandbox'])
def do_deployment_manifest(system, configuration): app.log(system, "Creating deployment manifest in", system['sandbox']) data = {'configuration': configuration} metafile = os.path.join(system['sandbox'], 'baserock', 'deployment.meta') with app.chdir(system['sandbox']), open(metafile, "w") as f: json.dump(data, f, indent=4, sort_keys=True, encoding='unicode-escape') f.flush()
def do_release_note(release_note): tempfile.tempdir = config['tmp'] tmpdir = tempfile.mkdtemp() if call(['git', 'config', 'user.name']): call(['git', 'config', 'user.name', 'ybd']) if call(['git', 'config', 'user.email']): call(['git', 'config', 'user.email', '*****@*****.**']) if 'release-since' in config: ref = config['release-since'] else: ref = get_last_tag('.') with explore(ref): old_defs = Morphs()._data for path in app.defs._data: dn = app.defs.get(path) if dn.get('cache'): log_changes(dn, tmpdir, old_defs, ref) count = 0 with open(release_note, 'w') as f: for log_file in os.listdir(tmpdir): count += 1 f.write('====================================================\n\n') with open(os.path.join(tmpdir, log_file)) as infile: for line in infile: f.write(line) f.write('\n\n') log('RELEASE NOTE', 'Changes for %s components logged at' % count, release_note)
def run_logged(this, cmd_list): app.log_env(this["log"], os.environ, argv_to_string(cmd_list)) with open(this["log"], "a") as logfile: if call(cmd_list, stdin=PIPE, stdout=logfile, stderr=logfile): app.log(this, "ERROR: command failed in directory %s:\n\n" % os.getcwd(), argv_to_string(cmd_list)) call(["tail", "-n", "200", this["log"]]) app.exit(this, "ERROR: log file is at", this["log"])
def deploy(target): '''Deploy systems and subsystems recursively''' defs = Definitions() deployment = target if type(target) is dict else defs.get(target) with app.timer(deployment, 'Starting deployment'): for system in deployment.get('systems', []): deploy(system) for subsystem in system.get('subsystems', []): deploy(subsystem) system = defs.get(deployment['path']) if system.get('arch') and system['arch'] != app.settings['arch']: app.log(target, 'Skipping deployment for', system['arch']) return None sandbox.setup(system) for name, deployment in deployment.get('deploy', {}).iteritems(): method = os.path.basename(deployment['type']) sandbox.run_extension(system, deployment, 'check', method) app.log(system, "Extracting system artifact") with open(cache.get_cache(system), "r") as artifact: call(['tar', 'x', '--directory', system['sandbox']], stdin=artifact) for ext in system.get('configuration-extensions', []): sandbox.run_extension(system, deployment, 'configure', os.path.basename(ext)) os.chmod(system['sandbox'], 0o755) sandbox.run_extension(system, deployment, 'write', method) sandbox.remove(system)
def upload(defs, this): cachefile = get_cache(defs, this) url = app.config['kbas-url'] + 'upload' params = {"filename": this['cache'], "password": app.config['kbas-password'], "checksum": md5(cachefile)} with open(cachefile, 'rb') as f: try: response = requests.post(url=url, data=params, files={"file": f}) if response.status_code == 201: app.log(this, 'Uploaded %s to' % this['cache'], url) return if response.status_code == 777: app.log(this, 'Reproduced %s at' % md5(cachefile), this['cache']) app.config['reproduced'].append([md5(cachefile), this['cache']]) return if response.status_code == 405: # server has different md5 for this artifact if this['kind'] == 'stratum': app.exit('BIT-FOR-BIT', 'ERROR: stratum reproduction failed for', this['cache']) app.log(this, 'Artifact server already has', this['cache']) return app.log(this, 'Artifact server problem:', response.status_code) except: pass app.log(this, 'Failed to upload', this['cache'])
def get_tree(this): ref = this["ref"] gitdir = os.path.join(app.config["gits"], get_repo_name(this["repo"])) if this["repo"].startswith("file://") or this["repo"].startswith("/"): gitdir = this["repo"].replace("file://", "") if not os.path.isdir(gitdir): app.exit(this, "ERROR: git repo not found:", this["repo"]) if not os.path.exists(gitdir): try: url = app.config["tree-server"] + "repo=" + get_repo_url(this["repo"]) + "&ref=" + ref response = requests.get(url=url) tree = response.json()["tree"] return tree except: if app.config.get("tree-server"): app.log(this, "WARNING: no tree from tree-server for", ref) mirror(this["name"], this["repo"]) with app.chdir(gitdir), open(os.devnull, "w") as fnull: if call(["git", "rev-parse", ref + "^{object}"], stdout=fnull, stderr=fnull): # can't resolve this ref. is it upstream? app.log(this, "Fetching from upstream to resolve %s" % ref) call(["git", "fetch", "origin"], stdout=fnull, stderr=fnull) try: tree = check_output(["git", "rev-parse", ref + "^{tree}"], universal_newlines=True)[0:-1] return tree except: # either we don't have a git dir, or ref is not unique # or ref does not exist app.exit(this, "ERROR: could not find tree for ref", (ref, gitdir))
def write_chunk_metafile(defs, chunk): '''Writes a chunk .meta file to the baserock dir of the chunk The split rules are used to divide up the installed files for the chunk into artifacts in the 'products' list ''' app.log(chunk['name'], 'splitting chunk') rules, splits = compile_rules(defs, chunk) install_dir = chunk['install'] fs = OSFS(install_dir) files = fs.walkfiles('.', search='depth') dirs = fs.walkdirs('.', search='depth') for path in files: for artifact, rule in rules: if rule.match(path): splits[artifact].append(path) break all_files = [a for x in splits.values() for a in x] for path in dirs: if not any(map(lambda y: y.startswith(path), all_files)) and path != '': for artifact, rule in rules: if rule.match(path) or rule.match(path + '/'): splits[artifact].append(path) break write_metafile(rules, splits, chunk)
def cache(defs, this): if get_cache(defs, this): app.log(this, "Bah! I could have cached", cache_key(defs, this)) return tempfile.tempdir = app.config['tmp'] tmpdir = tempfile.mkdtemp() cachefile = os.path.join(tmpdir, cache_key(defs, this)) if this.get('kind') == "system": utils.hardlink_all_files(this['install'], this['sandbox']) shutil.rmtree(this['install']) shutil.rmtree(this['build']) utils.set_mtime_recursively(this['sandbox']) utils.make_deterministic_tar_archive(cachefile, this['sandbox']) os.rename('%s.tar' % cachefile, cachefile) else: utils.set_mtime_recursively(this['install']) utils.make_deterministic_gztar_archive(cachefile, this['install']) os.rename('%s.tar.gz' % cachefile, cachefile) unpack(defs, this, cachefile) if app.config.get('kbas-password', 'insecure') != 'insecure' and \ app.config.get('kbas-url', 'http://foo.bar/') != 'http://foo.bar/': if this.get('kind', 'chunk') == 'chunk': with app.timer(this, 'upload'): upload(defs, this)
def checkout(name, repo, ref, checkout): gitdir = os.path.join(app.config['gits'], get_repo_name(repo)) if not os.path.exists(gitdir): mirror(name, repo) elif not mirror_has_ref(gitdir, ref): update_mirror(name, repo, gitdir) # checkout the required version of this from git with open(os.devnull, "w") as fnull: # We need to pass '--no-hardlinks' because right now there's nothing to # stop the build from overwriting the files in the .git directory # inside the sandbox. If they were hardlinks, it'd be possible for a # build to corrupt the repo cache. I think it would be faster if we # removed --no-hardlinks, though. if call(['git', 'clone', '--no-hardlinks', gitdir, checkout], stdout=fnull, stderr=fnull): app.exit(name, 'ERROR: git clone failed for', ref) with app.chdir(checkout): if call(['git', 'checkout', '--force', ref], stdout=fnull, stderr=fnull): app.exit(name, 'ERROR: git checkout failed for', ref) app.log(name, 'Git checkout %s in %s' % (repo, checkout)) app.log(name, 'Upstream version %s' % get_version(checkout, ref)) if os.path.exists('.gitmodules'): checkout_submodules(name, ref) utils.set_mtime_recursively(checkout)
def __init__(self, directory='.'): '''Load all definitions from a directory tree.''' self._definitions = {} self._trees = {} schemas = self.load_schemas() with app.chdir(directory): for dirname, dirnames, filenames in os.walk('.'): filenames.sort() dirnames.sort() if '.git' in dirnames: dirnames.remove('.git') for filename in filenames: if filename.endswith(('.def', '.morph')): path = os.path.join(dirname, filename) data = self._load(path) if data is not None: self.validate_schema(schemas, data) data['path'] = path[2:] self._fix_keys(data) self._tidy_and_insert_recursively(data) self.defaults = defaults.Defaults() caches_are_valid = self._check_trees() for path in self._definitions: try: this = self._definitions[path] if this.get('ref') and self._trees.get(path): if this['ref'] == self._trees.get(path)[0]: this['tree'] = self._trees.get(path)[1] except: app.log('DEFINITIONS', 'WARNING: problem with .trees file') pass
def install(defs, this, component): if os.path.exists(os.path.join(this['sandbox'], 'baserock', component['name'] + '.meta')): return app.log(this, 'Installing %s' % component['cache']) _install(defs, this, component)
def mirror(name, repo): tempfile.tempdir = app.config['tmp'] tmpdir = tempfile.mkdtemp() repo_url = get_repo_url(repo) try: tar_file = get_repo_name(repo_url) + '.tar' app.log(name, 'Try fetching tarball %s' % tar_file) # try tarball first with app.chdir(tmpdir), open(os.devnull, "w") as fnull: call(['wget', os.path.join(app.config['tar-url'], tar_file)]) call(['tar', 'xf', tar_file], stderr=fnull) os.remove(tar_file) update_mirror(name, repo, tmpdir) except: app.log(name, 'Try git clone from', repo_url) with open(os.devnull, "w") as fnull: if call(['git', 'clone', '--mirror', '-n', repo_url, tmpdir]): app.log(name, 'Failed to clone', repo, exit=True) with app.chdir(tmpdir): if call(['git', 'rev-parse']): app.log(name, 'Problem mirroring git repo at', tmpdir, exit=True) gitdir = os.path.join(app.config['gits'], get_repo_name(repo)) try: shutil.move(tmpdir, gitdir) app.log(name, 'Git repo is mirrored at', gitdir) except: pass
def compose(defs, target): '''Work through defs tree, building and assembling until target exists''' component = defs.get(target) if app.config.get('log-verbose'): app.log(target, "Composing", component['name']) # if we can't calculate cache key, we can't create this component if cache_key(defs, component) is False: return False # if this component is already cached, we're done if get_cache(defs, component): return cache_key(defs, component) # if we have a kbas, look there to see if this component exists if app.config.get('kbas-url'): with claim(defs, component): if get_remote(defs, component): app.config['counter'].increment() return cache_key(defs, component) if component.get('arch') and component['arch'] != app.config['arch']: return None with sandbox.setup(component): assemble(defs, component) if 'systems' not in component and not get_cache(defs, component): install_dependencies(defs, component) build(defs, component) return cache_key(defs, component)
def install_dependencies(defs, component): '''Install recursed dependencies of component into component's sandbox.''' def install(defs, component, dependencies): shuffle(dependencies) for it in dependencies: dependency = defs.get(it) if os.path.exists(os.path.join(component['sandbox'], 'baserock', dependency['name'] + '.meta')): # dependency has already been installed if app.config.get('log-verbose'): app.log(component, 'Already installed', dependency['name']) continue install(defs, component, dependency.get('build-depends', [])) if (it in component['build-depends']) or \ (dependency.get('build-mode', 'staging') == component.get('build-mode', 'staging')): compose(defs, dependency) if dependency.get('contents'): install(defs, component, dependency.get('contents')) sandbox.install(defs, component, dependency) component = defs.get(component) dependencies = component.get('build-depends', []) if app.config.get('log-verbose'): app.log(component, 'Installing dependencies\n', dependencies) install(defs, component, dependencies) if app.config.get('log-verbose'): sandbox.list_files(component)
def install_contents(defs, component): '''Install recursed contents of component into component's sandbox.''' def install(defs, component, contents): shuffle(contents) for it in contents: content = defs.get(it) if os.path.exists(os.path.join(component['sandbox'], 'baserock', content['name'] + '.meta')): # content has already been installed if app.config.get('log-verbose'): app.log(component, 'Already installed', content['name']) continue install(defs, component, content.get('contents', [])) compose(defs, content) if content.get('build-mode', 'staging') != 'bootstrap': sandbox.install(defs, component, content) component = defs.get(component) contents = component.get('contents', []) if app.config.get('log-verbose'): app.log(component, 'Installing contents\n', contents) install(defs, component, contents) if app.config.get('log-verbose'): sandbox.list_files(component)
def get_tree(this): ref = this['ref'] gitdir = os.path.join(app.config['gits'], get_repo_name(this['repo'])) if not os.path.exists(gitdir): try: url = (app.config['tree-server'] + 'repo=' + get_repo_url(this['repo']) + '&ref=' + ref) response = requests.get(url=url) tree = response.json()['tree'] return tree except: app.log(this, 'WARNING: no tree from tree-server', ref) mirror(this['name'], this['repo']) with app.chdir(gitdir), open(os.devnull, "w") as fnull: if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull, stderr=fnull): # can't resolve this ref. is it upstream? app.log(this, 'Fetching from upstream to resolve %s' % ref) call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull) try: tree = check_output(['git', 'rev-parse', ref + '^{tree}'], universal_newlines=True)[0:-1] return tree except: # either we don't have a git dir, or ref is not unique # or ref does not exist app.exit(this, 'ERROR: could not find tree for ref', (ref, gitdir))
def _insert(self, new_def): '''Insert a new definition into the dictionary, return the key. Takes a dict representing a single definition. If a definition with the same 'path' already exists, extend the existing definition with the contents of `new_def` unless it and the new definition contain a 'ref'. If any keys are duplicated in the existing definition, output a warning. If a definition with the same 'path' doesn't exist, just add `new_def` to the dictionary. ''' definition = self._definitions.get(new_def['path']) if definition: if (definition.get('ref') is None or new_def.get('ref') is None): for key in new_def: definition[key] = new_def[key] for key in new_def: if definition.get(key) != new_def[key]: app.log(new_def, 'WARNING: multiple definitions of', key) app.log(new_def, '%s | %s' % (definition.get(key), new_def[key])) else: self._definitions[new_def['path']] = new_def return new_def['path']
def __init__(self): '''Load all definitions from `cwd` tree.''' self._definitions = {} self._trees = {} json_schema = self._load(app.config.get('json-schema')) definitions_schema = self._load(app.config.get('defs-schema')) if json_schema and definitions_schema: import jsonschema as js js.validate(json_schema, json_schema) js.validate(definitions_schema, json_schema) things_have_changed = not self._check_trees() for dirname, dirnames, filenames in os.walk('.'): filenames.sort() dirnames.sort() if '.git' in dirnames: dirnames.remove('.git') for filename in filenames: if filename.endswith(('.def', '.morph')): definition_data = self._load( os.path.join(dirname, filename)) if definition_data is not None: if things_have_changed and definitions_schema: app.log(filename, 'Validating schema') js.validate(definition_data, definitions_schema) self._tidy_and_insert_recursively(definition_data) self.defaults = defaults.Defaults() if self._check_trees(): for name in self._definitions: self._definitions[name]['tree'] = self._trees.get(name)
def get_cache(defs, this): ''' Check if a cached artifact exists for the hashed version of this. ''' if cache_key(defs, this) is False: return False cachedir = os.path.join(app.config['artifacts'], cache_key(defs, this)) if os.path.isdir(cachedir): call(['touch', cachedir]) artifact = os.path.join(cachedir, cache_key(defs, this)) unpackdir = artifact + '.unpacked' if not os.path.isdir(unpackdir): tempfile.tempdir = app.config['tmp'] tmpdir = tempfile.mkdtemp() if call(['tar', 'xf', artifact, '--directory', tmpdir]): app.log(this, 'Problem unpacking', artifact) return False try: os.rename(tmpdir, unpackdir) except: # corner case... if we are here ybd is multi-instance, this # artifact was uploaded from somewhere, and more than one # instance is attempting to unpack. another got there first pass return os.path.join(cachedir, cache_key(defs, this)) return False
def update_mirror(name, repo, gitdir): with app.chdir(gitdir), open(os.devnull, "w") as fnull: app.log(name, 'Refreshing mirror for %s' % repo) repo_url = get_repo_url(repo) if call(['git', 'fetch', repo_url, '+refs/*:refs/*', '--prune'], stdout=fnull, stderr=fnull): app.log(name, 'Git update mirror failed', repo, exit=True)
def extract_commit(name, repo, ref, target_dir): '''Check out a single commit (or tree) from a Git repo. The checkout() function actually clones the entire repo, so this function is much quicker when you don't need to copy the whole repo into target_dir. ''' gitdir = os.path.join(app.config['gits'], get_repo_name(repo)) if not os.path.exists(gitdir): mirror(name, repo) elif not mirror_has_ref(gitdir, ref): update_mirror(name, repo, gitdir) with tempfile.NamedTemporaryFile() as git_index_file: git_env = os.environ.copy() git_env['GIT_INDEX_FILE'] = git_index_file.name git_env['GIT_WORK_TREE'] = target_dir app.log(name, 'Extracting commit', ref) if call(['git', 'read-tree', ref], env=git_env, cwd=gitdir): app.log(name, 'git read-tree failed for', ref, exit=True) app.log(name, 'Then checkout index', ref) if call(['git', 'checkout-index', '--all'], env=git_env, cwd=gitdir): app.log(name, 'Git checkout-index failed for', ref, exit=True) app.log(name, 'Done', ref) utils.set_mtime_recursively(target_dir)
def load_manifest(defs, target): cachepath, cachedir = os.path.split(cache.get_cache(defs, target)) metafile = cachepath + ".meta" metadata = None definition = defs.get(target) name = definition['name'] path = None if type(target) is str: path = target else: path = target['name'] try: with open(metafile, "r") as f: metadata = yaml.safe_load(f) except: app.log(name, 'WARNING: problem loading metadata', metafile) return None if metadata: app.log(name, 'loaded metadata for', path) defs.set_member(path, '_loaded', True) if metadata.get('products'): defs.set_member(path, '_artifacts', metadata['products'])
def cache_key(defs, this): definition = defs.get(this) if definition is None: app.exit(this, 'ERROR: No definition found for', this) if definition.get('cache') == 'calculating': app.exit(this, 'ERROR: recursion loop for', this) if definition.get('cache'): return definition['cache'] if definition.get('arch', app.config['arch']) != app.config['arch']: return False definition['cache'] = 'calculating' if definition.get('repo') and not definition.get('tree'): definition['tree'] = repos.get_tree(definition) hash_factors = {'arch': app.config['arch']} for factor in definition.get('build-depends', []): hash_factors[factor] = cache_key(defs, factor) for factor in definition.get('contents', []): hash_factors[factor] = cache_key(defs, factor) for factor in ['tree'] + defs.defaults.build_steps: if definition.get(factor): hash_factors[factor] = definition[factor] def hash_system_recursively(system): factor = system.get('path', 'BROKEN') hash_factors[factor] = cache_key(defs, factor) for subsystem in system.get('subsystems', []): hash_system_recursively(subsystem) if definition.get('kind') == 'cluster': for system in definition.get('systems', []): hash_system_recursively(system) result = json.dumps(hash_factors, sort_keys=True).encode('utf-8') safename = definition['name'].replace('/', '-') definition['cache'] = safename + "." + hashlib.sha256(result).hexdigest() app.config['total'] += 1 if not get_cache(defs, this) and definition.get('kind') != 'cluster': app.config['tasks'] += 1 app.log(definition, 'Cache_key is', definition['cache']) # If you want to catalog the artifacts for a system, do so if app.config.get('cache-log'): cache_list[definition.get('name')] = definition.get('cache') if definition.get('kind') == 'system': with open(app.config.get('cache-log'), 'w') as f: f.write(json.dumps(cache_list, indent=4)) app.log('cache-log', 'cache logged to', app.config.get('cache-log')) return definition['cache']
def get_build_commands(defs, this): '''Get commands specified in 'this', plus commands implied by build-system The containing definition may point to another definition file (using the 'path' field in YBD's internal data model) that contains build instructions, or it may only specify a predefined build system, using 'build-system' field. The definition containing build instructions can specify a predefined build-system and then override some or all of the command sequences it defines. If the definition file doesn't exist and no build-system is specified, this function will scan the contents the checked-out source repo and try to autodetect what build system is used. ''' if this.get('kind') == "system": # Systems must run their integration scripts as install commands this['install-commands'] = gather_integration_commands(defs, this) return if this.get('build-system') or os.path.exists(this['path']): build_system = this.get('build-system', 'manual') app.log(this, 'Defined build system is', build_system) else: files = os.listdir(this['build']) build_system = defs.defaults.detect_build_system(files) app.log(this, 'Autodetected build system is', build_system) for build_step in defs.defaults.build_steps: if this.get(build_step) is None: this[build_step] = \ defs.defaults.build_systems[build_system].get(build_step, [])
def build(defs, this): '''Actually create an artifact and add it to the cache This is what actually runs ./configure, make, make install (for example) By the time we get here, all dependencies for 'this' have been assembled. ''' if this.get('build-mode') != 'bootstrap': sandbox.ldconfig(this) if this.get('repo'): repos.checkout(this['name'], this['repo'], this['ref'], this['build']) get_build_commands(defs, this) env_vars = sandbox.env_vars_for_build(defs, this) app.log(this, 'Logging build commands to %s' % this['log']) for build_step in defs.defaults.build_steps: if this.get(build_step): app.log(this, 'Running', build_step) for command in this.get(build_step, []): if command is False: command = "false" elif command is True: command = "true" sandbox.run_sandboxed( this, command, env=env_vars, allow_parallel=('build' in build_step)) if this.get('devices'): sandbox.create_devices(this) with open(this['log'], "a") as logfile: logfile.write('Elapsed_time: %s\n' % app.elapsed(this['start-time']))
def __init__(self): ''' Load all definitions from `cwd` tree. ''' if self.__definitions != {}: return json_schema = self._load(app.settings.get('json-schema')) definitions_schema = self._load(app.settings.get('defs-schema')) if json_schema and definitions_schema: import jsonschema as js js.validate(json_schema, json_schema) js.validate(definitions_schema, json_schema) things_have_changed = not self._check_trees() for dirname, dirnames, filenames in os.walk('.'): if '.git' in dirnames: dirnames.remove('.git') for filename in filenames: if filename.endswith(('.def', '.morph')): contents = self._load(os.path.join(dirname, filename)) if contents is not None: if things_have_changed and definitions_schema: app.log(filename, 'Validating schema') js.validate(contents, definitions_schema) self._tidy(contents) if self._check_trees(): for name in self.__definitions: self.__definitions[name]['tree'] = self.__trees.get(name)
def cache(defs, this): if get_cache(defs, this): app.log(this, "Bah! I could have cached", cache_key(defs, this)) return tempfile.tempdir = app.config["tmp"] tmpdir = tempfile.mkdtemp() cachefile = os.path.join(tmpdir, cache_key(defs, this)) if this.get("kind") == "system": utils.hardlink_all_files(this["install"], this["sandbox"]) shutil.rmtree(this["install"]) shutil.rmtree(this["build"]) utils.set_mtime_recursively(this["sandbox"]) utils.make_deterministic_tar_archive(cachefile, this["sandbox"]) os.rename("%s.tar" % cachefile, cachefile) else: utils.set_mtime_recursively(this["install"]) utils.make_deterministic_gztar_archive(cachefile, this["install"]) os.rename("%s.tar.gz" % cachefile, cachefile) unpack(defs, this, cachefile) if ( app.config.get("kbas-password", "insecure") != "insecure" and app.config.get("kbas-url", "http://foo.bar/") != "http://foo.bar/" ): if this.get("kind") is not "cluster": with app.timer(this, "upload"): upload(defs, this)
def get_metadata(dn): '''Load an individual .meta file The .meta file is expected to be in the .unpacked/baserock directory of the built artifact ''' try: with open(path_to_metafile(dn), "r") as f: metadata = yaml.safe_load(f) log(dn, 'Loaded metadata', dn['path'], verbose=True) return metadata except: log(dn, 'WARNING: problem loading metadata', dn) return None
def on_toggle_bookmark(json): # We need to custom-load the user based on the jwt token passed up user = None try: user_id = jwt.decode(json['user_token'], app.config['SECRET_KEY'], algorithms=['HS256'])['id'] user = load_user(user_id) except: pass # leave user set to None log('c_toggle_bookmark', current_user, json['tower_id']) tower = towers[json['tower_id']] user.toggle_bookmark(tower)
def on_user_left(json): log('c_user_left', json) tower_id = json['tower_id'] tower = towers[tower_id] # We need to custom-load the user based on the jwt token passed up user = None if not json['anonymous_user']: try: user_id = jwt.decode(json['user_token'], app.config['SECRET_KEY'], algorithms=['HS256'])['id'] user = load_user(user_id) except: user_id = session.get('user_id') pass # leave user set to None else: user_id = session.get('user_id') pass # leave user set to None if user_id is None: return if not user: tower.remove_observer(user_id) emit('s_set_observers', {'observers': tower.observers}, broadcast=True, include_self=False, room=tower_id) return tower.remove_user(user_id) emit('s_user_left', {'user_name': user.username}, broadcast=True, include_self=True, room=tower_id) # Now that the user is gone, check if there are any hosts left. If not, make sure # the tower is not in host mode. if not tower.host_present(): tower.host_mode = False emit('s_host_mode', { 'tower_id': tower_id, 'new_mode': False }, broadcast=True, include_self=True, room=tower_id)
def main(): config = json.loads(open(real_path('/config/config.json')).read()) tunnel_type = str(config['tunnel_type_external']) inject_host = str(config['inject_host_external']) inject_port = int(config['inject_port_external']) socks5_port = config['socks5_port_external'] app.log('Inject set to {inject_host} port {inject_port}'.format( inject_host=inject_host, inject_port=inject_port), status='INFO') ssh_clients = app.ssh_clients(tunnel_type, inject_host, inject_port, socks5_port) ssh_clients.accounts = app.generate_accounts( app.convert_hostnames(real_path('/database/accounts.json'))) ssh_clients.start()
def remove_label(self, label): new_imgs = [] new_labels = [] for i, l in zip(self.images, self.labels): if l != label: new_imgs.append(i) new_labels.append(l) self.images = new_imgs self.labels = new_labels if not self.images and not self.labels: # Reset the recogniser self.recognizer = self._new_recognizer() else: # Train the recogniser again np_images = image.image_list_to_numpy_list(self.images) np_labels = np.array(self.labels) self.recognizer.train(np_images, np_labels) app.log(self.TAG, 'Removed label -', label)
def create_devices(dn): perms_mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO for device in dn['devices']: destfile = os.path.join(dn['install'], './' + device['filename']) mode = int(device['permissions'], 8) & perms_mask if device['type'] == 'c': mode = mode | stat.S_IFCHR elif device['type'] == 'b': mode = mode | stat.S_IFBLK else: raise IOError('Cannot create device node %s,' 'unrecognized device type "%s"' % (destfile, device['type'])) app.log(dn, "Creating device node", destfile) os.mknod(destfile, mode, os.makedev(device['major'], device['minor'])) os.chown(destfile, device['uid'], device['gid'])
def on_bell_rung(event_dict): log('c_bell_rung', event_dict) disagreement = False cur_bell = event_dict["bell"] tower_id = event_dict["tower_id"] cur_tower = towers[tower_id] bell_state = cur_tower.bell_state if bell_state[cur_bell - 1] is event_dict["stroke"]: bell_state[cur_bell - 1] = not bell_state[cur_bell - 1] else: log('Current stroke disagrees between server and client') disagreement = True emit('s_bell_rung', {"global_bell_state": bell_state, "who_rang": cur_bell, "disagree": disagreement}, broadcast=True, include_self=True, room=tower_id)
def _insert(self, this): for i, definition in enumerate(self.__definitions): if definition['name'] == this['name']: if definition.get('ref') is None or this.get('ref') is None: for key in this: definition[key] = this[key] return for key in this: if key == 'morph' or this[key] is None: continue if definition[key] != this[key]: app.log(this, 'WARNING: multiple definitions of', key) app.log(this, '%s | %s' % (definition[key], this[key])) self.__definitions.append(this)
def scan_webcam_for_face(): TAG = 'FaceScanner' face_img_list = [] while True: # Inner loop success, img = camera.get_raw_image() if success: # Try to detect a face gray_img = image.to_grayscale(img) has_face, face_area = detect_face(gray_img) if has_face: # Crop the image around face area face_img = image.crop_image(gray_img, face_area) face_img_list.append(face_img) app.log(TAG, 'Face added', len(face_img_list)) if len(face_img_list) == 10: # Scan complete break # Exit the outer loop and return return face_img_list
def _set_trees(self): '''Use the tree values from .trees file, to save time''' try: with open(os.path.join(config['artifacts'], '.trees')) as f: text = f.read() self._trees = yaml.safe_load(text) count = 0 for path in self._data: dn = self._data[path] if dn.get('ref') and self._trees.get(path): if dn['ref'] == self._trees.get(path)[0]: dn['tree'] = self._trees.get(path)[1] count += 1 log('DEFINITIONS', 'Re-used %s entries from .trees file' % count) except: log('DEFINITIONS', 'WARNING: problem with .trees file') pass
def add_person(self, name, imgs): self.label_counter += 1 label = self.label_counter p = Person(label, name) # Create the person # Add the person to the database self.person_list.append(p) with open(self.csv_file, 'a') as c: w = csv.DictWriter(c, fieldnames=self.FIELD) w.writerow({'label': p.get_label(), 'name': p.get_name()}) app.log(self.TAG, 'Added -', p.get_name()) # Lastly, train the recognizer with the new person labels = [p.get_label()] * len(imgs) self.recognizer.train(imgs, labels) return p
def run_logged(this, cmd_list, config=''): app.log_env(this['log'], '\n'.join(cmd_list)) with open(this['log'], "a") as logfile: if call(cmd_list, stdout=logfile, stderr=logfile): app.log(this, 'ERROR: command failed in directory %s:\n\n' % os.getcwd(), ' '.join(cmd_list)) app.log(this, 'ERROR: Containerisation settings:\n\n', config) app.log(this, 'ERROR: Path:\n\n', os.environ['PATH']) app.log(this, 'ERROR: log file is at', this['log']) raise SystemExit
def deploy_system(defs, system_spec, parent_location=''): '''Deploy a system and subsystems recursively. Takes a system spec (i.e. an entry in the "systems" list in a cluster definition), and optionally a path to a parent system tree. If `parent_location` is given then the `location` given in the cluster definition for the subsystem is appended to `parent_location`, with the result being used as the location for the deployment extensions. ''' system = defs.get(system_spec['path']) deploy_defaults = system_spec.get('deploy-defaults') sandbox.setup(system) app.log(system, 'Extracting system artifact into', system['sandbox']) with open(cache.get_cache(defs, system), 'r') as artifact: call(['tar', 'x', '--directory', system['sandbox']], stdin=artifact) for subsystem in system_spec.get('subsystems', []): if deploy_defaults: subsystem = dict(deploy_defaults.items() + subsystem.items()) deploy_system(defs, subsystem, parent_location=system['sandbox']) for name, deployment in system_spec.get('deploy', {}).iteritems(): method = deployment.get('type') or deployment.get('upgrade-type') method = os.path.basename(method) if deploy_defaults: deployment = dict(deploy_defaults.items() + deployment.items()) do_deployment_manifest(system, deployment) if parent_location: location = deployment.get('location') or \ deployment.get('upgrade-location') deployment['location'] = os.path.join(parent_location, location.lstrip('/')) try: sandbox.run_extension(system, deployment, 'check', method) except KeyError: app.log(system, "Couldn't find a check extension for", method) for ext in system.get('configuration-extensions', []): sandbox.run_extension(system, deployment, 'configure', os.path.basename(ext)) os.chmod(system['sandbox'], 0o755) sandbox.run_extension(system, deployment, 'write', method) app.remove_dir(system['sandbox'])
def build(this): '''Actually create an artifact and add it to the cache This is what actually runs ./configure, make, make install (for example) By the time we get here, all dependencies for 'this' have been assembled. ''' app.log(this, 'Start build') defs = Definitions() if this.get('repo'): repos.checkout(this['name'], this['repo'], this['ref'], this['build']) get_build_system_commands(defs, this) for build_step in buildsystem.build_steps: if this.get(build_step): app.log(this, 'Running', build_step) for command in this.get(build_step, []): sandbox.run_sandboxed(this, command)
def increment_day(day, last_day_of_classes, result, agenda_type): """ Given a day (T/W/TH), increment it to the next class (W/TH/T) """ if agenda_type == 'la': day += timedelta(days=7) return day if day.weekday() in [1, 2]: # Tuesday or Wednesday day += timedelta(days=1) # Tuesday or Wednesday else: day += timedelta(days=5) # Thursday if day > last_day_of_classes: for r in result: log(r) assert day <= last_day_of_classes, f'Day {day} falls after the "\ flast day of classes {last_day_of_classes}' return day
def __init__(self, directory='.'): '''Load all definitions from a directory tree.''' self._data = {} self._trees = {} self.defaults = Defaults() config['cpu'] = self.defaults.cpus.get(config['arch'], config['arch']) self.parse_files(directory) self._check_trees() for path in self._data: try: this = self._data[path] if this.get('ref') and self._trees.get(path): if this['ref'] == self._trees.get(path)[0]: this['tree'] = self._trees.get(path)[1] except: log('DEFINITIONS', 'WARNING: problem with .trees file') pass
def cookie2user(): cookies = request.cookies.get(Config.COOKIE_NAME) log('cookie', cookies) if not cookies: return None L = cookies.split('-') if len(L) != 3: return None uid, expires, sha1 = L if int(expires) < time.time(): return None user = User.query.get(uid) if not user: return None s = '%s-%s-%s-%s' % (user.id, user.passwd, expires, Config.COOKIE_KEY) if sha1 == hashlib.sha1(s.encode('utf-8')).hexdigest(): log('验证成功', user.__str__()) return user
def claim(dn): with open(lockfile(dn), 'a') as l: try: fcntl.flock(l, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception as e: if e.errno in (errno.EACCES, errno.EAGAIN): # flock() will report EACCESS or EAGAIN when the lock fails. raise RetryException(dn) else: log(dn, 'ERROR: surprise exception in assembly', '') import traceback traceback.print_exc() log(dn, 'Sandbox debris at', dn['sandbox'], exit=True) try: yield finally: if os.path.isfile(lockfile(dn)): os.remove(lockfile(dn))
def setup(this): currentdir = os.getcwd() currentenv = dict(os.environ) tempfile.tempdir = app.settings['staging'] this['assembly'] = tempfile.mkdtemp() this['build'] = os.path.join(this['assembly'], this['name']+ '.build') this['install'] = os.path.join(this['assembly'], this['name'] + '.inst') this['tmp'] = os.path.join(this['assembly'], 'tmp') for directory in ['build', 'install', 'tmp']: os.makedirs(this[directory]) this['log'] = os.path.join(app.settings['artifacts'], this['cache'] + '.build-log') try: build_env = clean_env(this) assembly_dir = this['assembly'] for directory in ['dev', 'tmp']: call(['mkdir', '-p', os.path.join(assembly_dir, directory)]) devnull = os.path.join(assembly_dir, 'dev/null') if not os.path.exists(devnull): call(['sudo', 'mknod', devnull, 'c', '1', '3']) call(['sudo', 'chmod', '666', devnull]) for key, value in (currentenv.items() + build_env.items()): if key in build_env: os.environ[key] = build_env[key] else: os.environ.pop(key) os.chdir(this['assembly']) app.log(this, 'Assembly sandbox is at', this['assembly']) yield finally: for key, value in currentenv.items(): if value: os.environ[key] = value else: if os.environ.get(key): os.environ.pop(key) os.chdir(currentdir) app.log(this, 'Assembly directory is still at', this['assembly'])
def write_stratum_metafiles(stratum): '''Write the .meta files for a stratum to the baserock dir The split rules are used to divide up the installed components into artifacts in the 'products' list in the stratum .meta file. Each artifact contains a list of chunk artifacts which match the stratum splitting rules ''' log(stratum['name'], 'Splitting', stratum.get('kind')) rules, splits = compile_rules(stratum) for item in stratum['contents']: chunk = app.defs.get(item) if chunk.get('build-mode', 'staging') == 'bootstrap': continue metadata = get_metadata(chunk) split_metadata = { 'ref': metadata.get('ref'), 'repo': metadata.get('repo'), 'products': [] } if config.get('artifact-version', 0) not in range(0, 1): split_metadata['cache'] = metadata.get('cache') chunk_artifacts = app.defs.get(chunk).get('artifacts', {}) for artifact, target in chunk_artifacts.items(): splits[target].append(artifact) for product in metadata['products']: for artifact, rule in rules: if rule.match(product['artifact']): split_metadata['products'].append(product) splits[artifact].append(product['artifact']) break meta = os.path.join(stratum['baserockdir'], chunk['name'] + '.meta') with open(meta, "w") as f: yaml.safe_dump(split_metadata, f, default_flow_style=False) write_metafile(rules, splits, stratum)
def compose(dn): '''Work through defs tree, building and assembling until target exists''' if type(dn) is not dict: dn = app.defs.get(dn) # if we can't calculate cache key, we can't create this component if cache_key(dn) is False: if 'tried' not in dn: log(dn, 'No cache_key, so skipping compose') dn['tried'] = True return False # if dn is already cached, we're done if get_cache(dn): return cache_key(dn) log(dn, "Composing", dn['name'], verbose=True) # if we have a kbas, look there to see if this component exists if config.get('kbas-url') and not config.get('reproduce'): with claim(dn): if get_remote(dn): config['counter'].increment() return cache_key(dn) # we only work with user-specified arch if 'arch' in dn and dn['arch'] != config['arch']: return None # Create composite components (strata, systems, clusters) systems = dn.get('systems', []) shuffle(systems) for system in systems: for s in system.get('subsystems', []): subsystem = app.defs.get(s['path']) compose(subsystem) compose(system['path']) with sandbox.setup(dn): install_contents(dn) build(dn) # bring in 'build-depends', and run make return cache_key(dn)
def log_changes(dn, tmpdir, old_defs, ref): do_git_log = False old_def = old_defs.get(dn['path']) log_file = os.path.join(tmpdir, dn['name']) with open(log_file, 'w') as f: keys = set(dn) - set(['tree', 'cache']) for key in keys: try: old_value = old_def.get(key) except: old_value = None if dn[key] != old_value: f.write('[%s] Value changed: %s\n' % (dn['path'], key)) if type(dn[key]) is str: f.write('%s | %s\n' % (old_value, dn[key])) if type(dn[key]) is not str and type(dn[key]) is not float: if old_value: for x in old_value: f.write(repr(x)) f.write('\n vvv\n') if dn[key]: for x in dn[key]: f.write(repr(x)) f.write('\n\n') if dn.get('kind', 'chunk') == 'chunk' and config['release-command']: log(dn, 'Logging git change history', tmpdir) try: gitdir = os.path.join(config['gits'], get_repo_name(dn['repo'])) if not os.path.exists(gitdir): mirror(dn['name'], dn['repo']) elif not mirror_has_ref(gitdir, ref): update_mirror(dn['name'], dn['repo'], gitdir) with chdir(gitdir): text = dn['ref'] + '..' if old_def and old_def.get('ref'): text += old_def['ref'] f.write(check_output(config['release-command'] + [text])) except: log(dn, 'WARNING: Failed to log git changes') if os.stat(log_file).st_size == 0: os.remove(log_file)
def run_program(): print welcome print instructions while True: i = raw_input("-> ") if i == 'a': add_person() elif i == 'l': list_all_persons() elif i == 'r': remove_person() elif i == 'p': predict_face_from_webcam() elif i == 'h': print instructions elif i == 'e': exit_program() else: app.log(TAG, 'Invalid option, try again or type h for help.')
def checkout_submodules(dn): app.log(dn, 'Checking git submodules') with open('.gitmodules', "r") as gitfile: # drop indentation in sections, as RawConfigParser cannot handle it content = '\n'.join([l.strip() for l in gitfile.read().splitlines()]) io = StringIO(content) parser = RawConfigParser() parser.readfp(io) for section in parser.sections(): # validate section name against the 'submodule "foo"' pattern submodule = re.sub(r'submodule "(.*)"', r'\1', section) path = parser.get(section, 'path') try: url = dn['submodules'][path]['url'] app.log(dn, 'Processing submodule %s from' % path, url) except: url = parser.get(section, 'url') app.log(dn, 'WARNING: fallback to submodule %s from' % path, url) try: # list objects in the parent repo tree to find the commit # object that corresponds to the submodule commit = check_output(['git', 'ls-tree', dn['ref'], path]) # read the commit hash from the output fields = commit.split() if len(fields) >= 2 and fields[1] == 'commit': submodule_commit = commit.split()[2] # fail if the commit hash is invalid if len(submodule_commit) != 40: raise Exception fulldir = os.path.join(os.getcwd(), path) _checkout(dn['name'], url, submodule_commit, fulldir) else: app.log(dn, 'Skipping submodule %s, not a commit:' % path, fields) except: app.log(dn, "Git submodules problem", exit=True)
def __init__(self, defs, component): if app.config['log-verbose'] and \ app.config.get('last-retry-component') != component: app.log(component, 'Already downloading/building, so wait/retry') if app.config.get('last-retry'): wait = datetime.datetime.now() - app.config.get('last-retry') if wait.seconds < 1: with open(lockfile(defs, component), 'r') as l: call([ 'flock', '--shared', '--timeout', app.config.get('timeout', '60'), str(l.fileno()) ]) app.config['last-retry'] = datetime.datetime.now() app.config['last-retry-component'] = component for dirname in app.config['sandboxes']: app.remove_dir(dirname) app.config['sandboxes'] = [] pass
def on_user_left(json): log('c_user_left', json) tower_id = json['tower_id'] tower = towers[tower_id] user_id = current_user.id if not current_user.is_anonymous else session['user_id'] if current_user.is_anonymous: tower.remove_observer(user_id) emit('s_set_observers', {'observers': tower.observers}, broadcast = True, include_self = False, room=tower_id) return tower.remove_user(user_id) emit('s_user_left', { 'user_name': current_user.username }, broadcast=True, include_self = True, room=tower_id) send_assignments(tower_id)
def cache_key(dn): if dn is None: app.log(dn, 'No definition found for', dn, exit=True) if type(dn) is not dict: dn = app.defs.get(dn) if dn.get('cache') == 'calculating': app.log(dn, 'Recursion loop for', dn, exit=True) if dn.get('cache'): return dn['cache'] if dn.get('arch', app.config['arch']) != app.config['arch']: if 'tried' not in dn: dn['tried'] = True app.log(dn, 'No cache_key for arch %s mismatch' % dn['arch'], app.config['arch']) return False dn['cache'] = 'calculating' key = 'no-build' if app.config.get('mode', 'normal') in ['keys-only', 'normal']: if dn.get('repo') and not dn.get('tree'): dn['tree'] = get_tree(dn) factors = hash_factors(dn) factors = json.dumps(factors, sort_keys=True).encode('utf-8') key = hashlib.sha256(factors).hexdigest() dn['cache'] = dn['name'] + "." + key app.config['total'] += 1 x = 'x' if not get_cache(dn): x = ' ' app.config['tasks'] += 1 if dn.get('kind', 'chunk') == 'chunk': app.config['chunks'] += 1 if dn.get('kind', 'chunk') == 'stratum': app.config['strata'] += 1 if dn.get('kind', 'chunk') == 'system': app.config['systems'] += 1 app.log('CACHE-KEYS', '[%s]' % x, dn['cache']) if app.config.get('manifest', False): update_manifest(dn, app.config['manifest']) if 'keys' in app.config: app.config['keys'] += [dn['cache']] return dn['cache']
def train(self, imgs, labels): np_images = image.image_list_to_numpy_list(imgs) np_labels = np.array(labels) try: # Try to update the recognizer first self.recognizer.update(np_images, np_labels) app.log(self.TAG, 'Update completed') except cv2.error as e: app.log(self.TAG, 'Error updating -', e) app.log(self.TAG, 'Attempting to train instead') self.recognizer.train(np_images, np_labels) app.log(self.TAG, 'Training completed') self.images.extend(imgs) self.labels.extend(labels)
def _load(self, path): '''Load a single definition file as a dict. The file is assumed to be yaml, and we insert the provided path into the dict keyed as 'path'. ''' try: with open(path) as f: text = f.read() contents = yaml.safe_load(text) except: app.log('DEFINITIONS', 'WARNING: problem loading', path) return None if type(contents) is not dict: app.log('DEFINITIONS', 'WARNING: %s contents is not dict:' % path, str(contents)[0:50]) return None contents['path'] = path[2:] return contents
def clear(deleted, artifact_dir): artifacts = utils.sorted_ls(artifact_dir) for artifact in artifacts: stat = os.statvfs(artifact_dir) free = stat.f_frsize * stat.f_bavail / 1000000000 if free >= app.config.get('min-gigabytes', 10): app.log('SETUP', '%sGB is enough free space' % free) if deleted > 0: app.log('SETUP', 'Culled %s items in' % deleted, artifact_dir) return True path = os.path.join(artifact_dir, artifact) if os.path.exists(os.path.join(path, artifact + '.unpacked')): path = os.path.join(path, artifact + '.unpacked') if os.path.exists(path) and artifact not in app.config['keys']: tmpdir = tempfile.mkdtemp() shutil.move(path, os.path.join(tmpdir, 'to-delete')) app.remove_dir(tmpdir) deleted += 1 return False