def _prep_or_apply(self, builder, instr, role, path, is_prepare): # NB: take care to apply a chown command to the file named, # even if it is a symbolic link (the default is --reference, # which would only apply the chown to the file the symbolic # link references) dp = filespec.FSFileSpecDataProvider(path) files = dp.abs_match(instr.filespec) if instr.new_user is None: cmd = ["chgrp", instr.new_group] elif instr.new_group is None: cmd = ["chown", "--no-dereference", instr.new_user] else: cmd = ["chown", "--no-dereference", "%s:%s"%(instr.new_user, instr.new_group)] for f in files: if is_prepare: # @TODO: This doesn't handle directories that have been # chowned and will collapse in a soggy heap. If support for # those is required, need to either: # sudo rm -rf dir # sudo chown -R <nonprivuser> dir # or just run the whole rsync under sudo. utils.run0(["rm", "-f", f]) else: utils.run0([cmd, f])
def build_label(self, builder, label): """ Build the relevant label. """ self.ensure_dirs(builder, label) tag = label.tag if (tag == utils.LabelTag.PreConfig): # Nothing to do. pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): pass elif (tag == utils.LabelTag.Installed): # Concoct a suitable dpkg command. inv = builder # Extract into the object directory .. so I can depend on them later. # - actually, Debian packaging doesn't work like that. Rats. # - rrw 2009-11-24 #extract_into_obj(inv, self.co_name, label, self.pkg_file) inst_dir = inv.package_install_path(label) tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_dir = inv.checkout_path(tmp) # Using dpkg doesn't work here for many reasons. dpkg_cmd = ["dpkg-deb", "-X", os.path.join(co_dir, self.pkg_file), inst_dir] utils.run0(dpkg_cmd) # Pick up any instructions that got left behind instr_file = self.instr_name if (instr_file is None): instr_file = "%s.instructions.xml"%(label.name) instr_path = os.path.join(co_dir, instr_file) if (os.path.exists(instr_path)): # We have instructions .. ifile = db.InstructionFile(instr_path) ifile.get() builder.instruct(label.name, label.role, ifile) elif (tag == utils.LabelTag.PostInstalled): if self.post_install_makefile is not None: inv = builder tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_path = inv.checkout_path(tmp) with Directory(co_path): utils.run0(["make", "-f", self.post_install_makefile, "%s-postinstall"%label.name]) elif (tag == utils.LabelTag.Clean or tag == utils.LabelTag.DistClean):# inv = builder admin_dir = os.path.join(inv.package_obj_path(label)) utils.recursively_remove(admin_dir) else: raise utils.MuddleBug("Invalid tag specified for deb pkg %s"%(label))
def extract_into_obj(inv, co_name, label, pkg_file): tmp = Label(utils.LabelType.Checkout, co_name, domain=label.domain) co_dir = inv.checkout_path(tmp) obj_dir = inv.package_obj_path(label) dpkg_cmd = ["dpkg-deb", "-X", os.path.join(co_dir, pkg_file), os.path.join(obj_dir, "obj")] utils.run0(dpkg_cmd) # Now install any include or lib files .. installed_into = os.path.join(obj_dir, "obj") inc_dir = os.path.join(obj_dir, "include") lib_dir = os.path.join(obj_dir, "lib") lib_dir = os.path.join(obj_dir, "share") utils.ensure_dir(inc_dir) utils.ensure_dir(lib_dir) # Copy everything in usr/include .. for i in (("include", "include"), ("lib", "lib"), ("usr/include", "include"), ("usr/lib", "lib"), ("usr/share", "share")): (src,dst) = i src_path = os.path.join(installed_into, src) dst_path= os.path.join(obj_dir, dst) if (os.path.exists(src_path) and os.path.isdir(src_path)): utils.copy_without(src_path, dst_path, without = None, object_exactly = True)
def ssh_remote_cmd(self, remote_cmd, dirs=None, dry_run=False): """SSH to our location, and run the command over the directories. * 'remote_cmd' is the words that make up the command (as a list). * 'dirs' is the list of directories we want to pass to the command. If this is None, or an empty list, then we won't do that... """ parts = ['ssh'] if self.port: parts.append('-p %s'%self.port) parts.append(self.user_at_host) parts += remote_cmd cmd = ' '.join(parts) if dry_run: print "Would run: %s "%cmd if dirs: print "and pass it the following directories:" print "\n".join(dirs) elif dirs: print "> %s"%cmd p = subprocess.Popen(parts, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdoutdata, stderrdata = p.communicate("\n".join(dirs) +'\n') if p.returncode: print >> sys.stderr, "Error invoking the remote script (rc=%d):"%p.returncode print >> sys.stderr, stdoutdata raise GiveUp("Error invoking the remote script, rc=%d"%p.returncode) print "Script exited successfully, output was:\n%s\n<<<END OUTPUT>>>\n"%stdoutdata else: run0(cmd)
def extract_into_obj(inv, co_name, label, pkg_file): tmp = Label(utils.LabelType.Checkout, co_name, domain=label.domain) co_dir = inv.checkout_path(tmp) obj_dir = inv.package_obj_path(label) dpkg_cmd = [ "dpkg-deb", "-X", os.path.join(co_dir, pkg_file), os.path.join(obj_dir, "obj") ] utils.run0(dpkg_cmd) # Now install any include or lib files .. installed_into = os.path.join(obj_dir, "obj") inc_dir = os.path.join(obj_dir, "include") lib_dir = os.path.join(obj_dir, "lib") lib_dir = os.path.join(obj_dir, "share") utils.ensure_dir(inc_dir) utils.ensure_dir(lib_dir) # Copy everything in usr/include .. for i in (("include", "include"), ("lib", "lib"), ("usr/include", "include"), ("usr/lib", "lib"), ("usr/share", "share")): (src, dst) = i src_path = os.path.join(installed_into, src) dst_path = os.path.join(obj_dir, dst) if (os.path.exists(src_path) and os.path.isdir(src_path)): utils.copy_without(src_path, dst_path, without=None, object_exactly=True)
def deploy(self, builder, label, target_base): for asm in self.assemblies: src = os.path.join(asm.get_source_dir(builder), asm.from_rel) dst = os.path.join(target_base, asm.to_name) if not os.path.exists(src): if asm.fail_on_absent_source: raise GiveUp("Deployment %s: source object %s does not" " exist."%(label.name, src)) # Else no one cares :-) else: # If this is a file, just copy it. if (not os.path.isdir(src)): if (asm.using_rsync): utils.run_cmd("rsync -avz \"%s\" \"%s\""%(src,dst)) else: utils.copy_file(src,dst,object_exactly=asm.copy_exactly) elif asm.using_rsync: # Rsync for great speed! try: os.makedirs(dst) except OSError: pass xdst = dst if xdst[-1] != "/": xdst = xdst + "/" utils.run0("rsync -avz \"%s/.\" \"%s\""%(src,xdst)) elif asm.recursive: utils.recursively_copy(src, dst, object_exactly=asm.copy_exactly) else: utils.copy_file(src, dst, object_exactly=asm.copy_exactly)
def ssh_remote_cmd(self, remote_cmd, dirs=None, dry_run=False): """SSH to our location, and run the command over the directories. * 'remote_cmd' is the words that make up the command (as a list). * 'dirs' is the list of directories we want to pass to the command. If this is None, or an empty list, then we won't do that... """ parts = ['ssh'] if self.port: parts.append('-p %s' % self.port) parts.append(self.user_at_host) parts += remote_cmd cmd = ' '.join(parts) if dry_run: print "Would run: %s " % cmd if dirs: print "and pass it the following directories:" print "\n".join(dirs) elif dirs: print "> %s" % cmd p = subprocess.Popen(parts, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdoutdata, stderrdata = p.communicate("\n".join(dirs) + '\n') if p.returncode: print >> sys.stderr, "Error invoking the remote script (rc=%d):" % p.returncode print >> sys.stderr, stdoutdata raise GiveUp("Error invoking the remote script, rc=%d" % p.returncode) print "Script exited successfully, output was:\n%s\n<<<END OUTPUT>>>\n" % stdoutdata else: run0(cmd)
def _prep_or_apply(self, builder, instr, role, path, is_prepare): # NB: take care to apply a chown command to the file named, # even if it is a symbolic link (the default is --reference, # which would only apply the chown to the file the symbolic # link references) dp = filespec.FSFileSpecDataProvider(path) files = dp.abs_match(instr.filespec) if instr.new_user is None: cmd = ["chgrp", instr.new_group] elif instr.new_group is None: cmd = ["chown", "--no-dereference", instr.new_user] else: cmd = [ "chown", "--no-dereference", "%s:%s" % (instr.new_user, instr.new_group) ] for f in files: if is_prepare: # @TODO: This doesn't handle directories that have been # chowned and will collapse in a soggy heap. If support for # those is required, need to either: # sudo rm -rf dir # sudo chown -R <nonprivuser> dir # or just run the whole rsync under sudo. utils.run0(["rm", "-f", f]) else: utils.run0([cmd, f])
def deploy(self, builder, label): deploy_dir = builder.deploy_path(label) # First off, delete the target directory utils.recursively_remove(deploy_dir) utils.ensure_dir(deploy_dir) for role, domain in self.roles: if domain: print "> %s: Deploying role %s in domain %s .. "%(label.name, role, domain) else: print "> %s: Deploying role %s .. "%(label.name, role) install_dir = builder.role_install_path(role, domain = domain) utils.recursively_copy(install_dir, deploy_dir, object_exactly=True) # This is somewhat tricky as it potentially requires privilege elevation. # Privilege elevation is done by hooking back into ourselves via a # build command to a label we registered earlier. # # Note that you cannot split instruction application - once the first # privilege-requiring instruction is executed, all further instructions # may require privilege even if they didn't before (e.g. a chmod after # chown) # First off, do we need to at all? need_root_for = set() for role, domain in self.roles: lbl = depend.Label(utils.LabelType.Package, "*", role, "*", domain=domain) install_dir = builder.role_install_path(role, domain = label.domain) instr_list = builder.load_instructions(lbl) for (lbl, fn, instr_file) in instr_list: # Obey this instruction? for instr in instr_file: iname = instr.outer_elem_name() if iname in self.app_dict: if self.app_dict[iname].needs_privilege(builder, instr, role, install_dir): need_root_for.add(iname) # Deliberately do not break - we want to check everything for # validity before acquiring privilege. else: raise utils.GiveUp("File deployments don't know about " + "instruction %s"%iname + " found in label %s (filename %s)"%(lbl, fn)) print "Rerunning muddle to apply instructions .. " permissions_label = depend.Label(utils.LabelType.Deployment, label.name, None, # XXX label.role, utils.LabelTag.InstructionsApplied, domain = label.domain) if need_root_for: print "I need root to do %s - sorry! - running sudo .."%(', '.join(sorted(need_root_for))) utils.run0("sudo %s buildlabel '%s'"%(builder.muddle_binary, permissions_label)) else: utils.run0("%s buildlabel '%s'"%(builder.muddle_binary, permissions_label))
def apply(self, builder, instr, role, path): dp = filespec.FSFileSpecDataProvider(path) files = dp.abs_match(instr.filespec) # @todo We _really_ need to use xargs here .. for f in files: utils.run0(["chmod", instr.new_mode, f]) return True
def build_label(self, builder, label): """ Build the relevant label. We'll assume that the checkout actually exists. """ tag = label.tag self.ensure_dirs(builder, label) # XXX We have no way of remembering a checkout in a different domain # XXX (from the label we're building) so for the moment we won't even # XXX try... tmp = Label(utils.LabelType.Checkout, self.co, domain=label.domain) co_path = builder.db.get_checkout_path(tmp) with Directory(co_path): self._amend_env(co_path) makefile_name = deduce_makefile_name(self.makefile_name, self.per_role_makefiles, label.role) make_cmd = self._make_command(builder, makefile_name) if (tag == utils.LabelTag.PreConfig): # Preconfigure - nothing need be done pass elif (tag == utils.LabelTag.Configured): # We should probably do the configure thing .. if (self.has_make_config): utils.run0(make_cmd + ["config"]) elif (tag == utils.LabelTag.Built): utils.run0(make_cmd) elif (tag == utils.LabelTag.Installed): utils.run0(make_cmd + ["install"]) elif (tag == utils.LabelTag.PostInstalled): if (self.rewriteAutoconf): #print "> Rewrite autoconf for label %s"%(label) obj_path = builder.package_obj_path(label) #print ">obj_path = %s"%(obj_path) if (self.execRelPath is None): sendExecPrefix = None else: sendExecPrefix = os.path.join(obj_path, self.execRelPath) rewrite.fix_up_pkgconfig_and_la(builder, obj_path, execPrefix=sendExecPrefix) elif (tag == utils.LabelTag.Clean): utils.run0(make_cmd + ["clean"]) elif (tag == utils.LabelTag.DistClean): utils.run0(make_cmd + ["distclean"]) else: raise utils.MuddleBug("Invalid tag specified for " "MakePackage building %s" % (label))
def build_label(self, builder, label): """ Actually install the dev package. """ self.ensure_dirs(builder, label) tag = label.tag if (tag == utils.LabelTag.PreConfig): # Nothing to do pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): pass elif (tag == utils.LabelTag.Installed): # Extract into /obj inv = builder extract_into_obj(inv, self.co_name, label, self.pkg_file) if (self.nonDevPkgFile is not None): extract_into_obj(inv, self.nonDevCoName, label, self.nonDevPkgFile) # Now we rewrite all the absolute links to be relative to the install # directory. rewrite_links(inv, label) elif (tag == utils.LabelTag.PostInstalled): if self.post_install_makefile is not None: inv = builder tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_path = inv.checkout_path(tmp) with Directory(co_path): utils.run0([ "make", "-f", self.post_install_makefile, "%s-postinstall" % (label.name) ]) # .. and now we rewrite any pkgconfig etc. files left lying # about. obj_path = builder.package_obj_path(label) print "> Rewrite .pc and .la files in %s" % (obj_path) rewrite.fix_up_pkgconfig_and_la(builder, obj_path) elif (tag == utils.LabelTag.Clean or tag == utils.LabelTag.DistClean): # Just remove the object directory. inv = builder utils.recursively_remove(inv.package_obj_path(label)) else: raise utils.MuddleBug("Invalid tag specified for deb pkg %s" % (label))
def build_label(self, builder, label): """ Build the relevant label. We'll assume that the checkout actually exists. """ tag = label.tag self.ensure_dirs(builder, label) # XXX We have no way of remembering a checkout in a different domain # XXX (from the label we're building) so for the moment we won't even # XXX try... tmp = Label(utils.LabelType.Checkout, self.co, domain=label.domain) co_path = builder.db.get_checkout_path(tmp) with Directory(co_path): self._amend_env(co_path) makefile_name = deduce_makefile_name(self.makefile_name, self.per_role_makefiles, label.role) make_cmd = self._make_command(builder, makefile_name) if (tag == utils.LabelTag.PreConfig): # Preconfigure - nothing need be done pass elif (tag == utils.LabelTag.Configured): # We should probably do the configure thing .. if (self.has_make_config): utils.run0(make_cmd + ["config"]) elif (tag == utils.LabelTag.Built): utils.run0(make_cmd) elif (tag == utils.LabelTag.Installed): utils.run0(make_cmd + ["install"]) elif (tag == utils.LabelTag.PostInstalled): if (self.rewriteAutoconf): #print "> Rewrite autoconf for label %s"%(label) obj_path = builder.package_obj_path(label) #print ">obj_path = %s"%(obj_path) if (self.execRelPath is None): sendExecPrefix = None else: sendExecPrefix = os.path.join(obj_path, self.execRelPath) rewrite.fix_up_pkgconfig_and_la(builder, obj_path, execPrefix = sendExecPrefix) elif (tag == utils.LabelTag.Clean): utils.run0(make_cmd + ["clean"]) elif (tag == utils.LabelTag.DistClean): utils.run0(make_cmd + ["distclean"]) else: raise utils.MuddleBug("Invalid tag specified for " "MakePackage building %s"%(label))
def scp_remote_cmd(self, local_script, remote_script, dry_run=False): """SCP the given script to our location. """ parts = ['scp'] if self.port: parts.append('-P %s'%self.port) parts.append(local_script) parts.append('%s:%s'%(self.user_at_host, remote_script)) cmd = ' '.join(parts) if dry_run: print "Would run: %s"%cmd else: run0(cmd)
def scp_remote_cmd(self, local_script, remote_script, dry_run=False): """SCP the given script to our location. """ parts = ['scp'] if self.port: parts.append('-P %s' % self.port) parts.append(local_script) parts.append('%s:%s' % (self.user_at_host, remote_script)) cmd = ' '.join(parts) if dry_run: print "Would run: %s" % cmd else: run0(cmd)
def sort_out_and_run_instructions(self, builder, label): # Sort out and run the instructions. This may need root. need_root_for = set() for asm in self.assemblies: # there's a from label - does it have instructions? # If we're not supposed to obey them anyway, give up. if not asm.obeyInstructions: continue lbl = Label(utils.LabelType.Package, '*', asm.from_label.role, '*', domain=asm.from_label.domain) install_dir = builder.role_install_path(lbl.role, label.domain) instr_list = builder.load_instructions(lbl) for (lbl, fn, instr_file) in instr_list: # Obey this instruction? for instr in instr_file: iname = instr.outer_elem_name() if iname in self.app_dict: if self.app_dict[iname].needs_privilege( builder, instr, lbl.role, install_dir): need_root_for.add(iname) # Deliberately do not break - we want to check everything for # validity before acquiring privilege. else: raise GiveUp("Collect deployments don't know about " + "instruction %s" % iname + " found in label %s (filename %s)" % (lbl, fn)) print "Rerunning muddle to apply instructions .. " permissions_label = Label( utils.LabelType.Deployment, label.name, None, # XXX label.role, utils.LabelTag.InstructionsApplied, domain=label.domain) cmd = [builder.muddle_binary, "buildlabel", str(permissions_label)] if need_root_for: print "I need root to do %s - sorry! - running sudo .." % ( ', '.join(sorted(need_root_for))) utils.run0(["sudo"] + cmd) else: utils.run0(cmd)
def build_label(self, builder, label): """ Actually install the dev package. """ self.ensure_dirs(builder, label) tag = label.tag if (tag == utils.LabelTag.PreConfig): # Nothing to do pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): pass elif (tag == utils.LabelTag.Installed): # Extract into /obj inv = builder extract_into_obj(inv, self.co_name, label, self.pkg_file) if (self.nonDevPkgFile is not None): extract_into_obj(inv, self.nonDevCoName, label, self.nonDevPkgFile) # Now we rewrite all the absolute links to be relative to the install # directory. rewrite_links(inv, label) elif (tag == utils.LabelTag.PostInstalled): if self.post_install_makefile is not None: inv = builder tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_path = inv.checkout_path(tmp) with Directory(co_path): utils.run0(["make", "-f", self.post_install_makefile, "%s-postinstall"%(label.name)]) # .. and now we rewrite any pkgconfig etc. files left lying # about. obj_path = builder.package_obj_path(label) print "> Rewrite .pc and .la files in %s"%(obj_path) rewrite.fix_up_pkgconfig_and_la(builder, obj_path) elif (tag == utils.LabelTag.Clean or tag == utils.LabelTag.DistClean): # Just remove the object directory. inv = builder utils.recursively_remove(inv.package_obj_path(label)) else: raise utils.MuddleBug("Invalid tag specified for deb pkg %s"%(label))
def sort_out_and_run_instructions(self, builder, label): # Sort out and run the instructions. This may need root. need_root_for = set() for asm in self.assemblies: # there's a from label - does it have instructions? # If we're not supposed to obey them anyway, give up. if not asm.obeyInstructions: continue lbl = Label(utils.LabelType.Package, '*', asm.from_label.role, '*', domain=asm.from_label.domain) install_dir = builder.role_install_path(lbl.role, label.domain) instr_list = builder.load_instructions(lbl) for (lbl, fn, instr_file) in instr_list: # Obey this instruction? for instr in instr_file: iname = instr.outer_elem_name() if iname in self.app_dict: if self.app_dict[iname].needs_privilege(builder, instr, lbl.role, install_dir): need_root_for.add(iname) # Deliberately do not break - we want to check everything for # validity before acquiring privilege. else: raise GiveUp("Collect deployments don't know about " + "instruction %s"%iname + " found in label %s (filename %s)"%(lbl, fn)) print "Rerunning muddle to apply instructions .. " permissions_label = Label(utils.LabelType.Deployment, label.name, None, # XXX label.role, utils.LabelTag.InstructionsApplied, domain = label.domain) cmd = [builder.muddle_binary, "buildlabel", str(permissions_label)] if need_root_for: print "I need root to do %s - sorry! - running sudo .."%(', '.join(sorted(need_root_for))) utils.run0(["sudo"] + cmd) else: utils.run0(cmd)
def do_genromfs(self, builder, label, my_tmp): """ genromfs everything up into a RomFS image. """ if self.target_name is None: tgt = "rom.romfs" else: tgt = self.target_name utils.ensure_dir(builder.deploy_path(label)) final_tgt = os.path.join(builder.deploy_path(label), tgt) cmd = "%s -f \"%s\"" % (self.genromfs, final_tgt) if (self.volume_label is not None): cmd = cmd + " -V \"%s\"" % self.volume_label if (self.alignment is not None): cmd = cmd + " -a %d" % (int(self.alignment)) cmd = cmd + " -d \"%s\"" % (my_tmp) utils.run0(cmd)
def do_genromfs(self, builder, label, my_tmp): """ genromfs everything up into a RomFS image. """ if self.target_name is None: tgt = "rom.romfs" else: tgt = self.target_name utils.ensure_dir(builder.deploy_path(label)) final_tgt = os.path.join(builder.deploy_path(label), tgt) cmd = "%s -f \"%s\""%(self.genromfs, final_tgt) if (self.volume_label is not None): cmd = cmd + " -V \"%s\""%self.volume_label if (self.alignment is not None): cmd = cmd + " -a %d"%(int(self.alignment)) cmd = cmd + " -d \"%s\""%(my_tmp) utils.run0(cmd)
def do_mksquashfs(self, builder, label, my_tmp): """ mksquashfs everything up into a SquashFS image. """ if (self.target_name is None): tgt = "rom.squashfs" else: tgt = self.target_name utils.ensure_dir(builder.deploy_path(label)) final_tgt = os.path.join(builder.deploy_path(label), tgt) # mksquashfs will, by default, append rather than replacing, so.. try: os.remove(final_tgt) except OSError as e: if e.errno != errno.ENOENT: # Only re-raise if it wasn't file missing raise cmd = "%s \"%s\" \"%s\" -noappend -all-root -info -comp xz"%(self.mksquashfs, my_tmp, final_tgt) utils.run0(cmd)
def do_mksquashfs(self, builder, label, my_tmp): """ mksquashfs everything up into a SquashFS image. """ if (self.target_name is None): tgt = "rom.squashfs" else: tgt = self.target_name utils.ensure_dir(builder.deploy_path(label)) final_tgt = os.path.join(builder.deploy_path(label), tgt) # mksquashfs will, by default, append rather than replacing, so.. try: os.remove(final_tgt) except OSError as e: if e.errno != errno.ENOENT: # Only re-raise if it wasn't file missing raise cmd = "%s \"%s\" \"%s\" -noappend -all-root -info -comp xz" % ( self.mksquashfs, my_tmp, final_tgt) utils.run0(cmd)
def unpack_archive(self, builder, label): # Since we're going to unpack into the obj/ directory, make sure we # have one self.ensure_dirs(builder, label) try: # muddle 2 checkout_dir = builder.db.get_checkout_path(self.co, domain=label.domain) obj_dir = builder.package_obj_path(self.name, self.role, domain=label.domain) except TypeError: # muddle 3 checkout_label = Label(utils.LabelType.Checkout, self.co, domain=label.domain) checkout_dir = builder.db.get_checkout_path(checkout_label) package_label = Label(utils.LabelType.Package, self.name, self.role, domain=label.domain) obj_dir = builder.package_obj_path(package_label) archive_path = os.path.join(checkout_dir, self.archive_file) # Make sure to remove any previous unpacking of the archive dest_dir = os.path.join(obj_dir, self.archive_dir) if os.path.exists(dest_dir): utils.run0(['rm', '-rf', dest_dir]) utils.run0(['tar', '-C', obj_dir, '-xf', archive_path]) # Ideally, we'd have unpacked the directory as obj/, so that we can # refer to it as $(MUDDLE_OBJ_OBJ). However, with a little cunning... with Directory(obj_dir): utils.run0(['ln', '-sf', self.archive_dir, 'obj'], show_command=True, show_output=True)
def ensure_version(self, builder, repo, co_leaf, options, verbose=True): """ Ensures that the root git repo is the right version. If it exists, will error out if it isn't. Run in the root directory. """ if (os.path.exists(".git")): # Get the checkout revision. rr = repo.revision if (rr is None): rr = 'HEAD' rev = self._calculate_revision(self, rr) # Now get the version we have .. rev2 = self._calculate_revision(self, 'HEAD') if (rev != rev2): raise GiveUp( "git repo required for %s is revision (%s) %s, but we have %s" % (co_leaf, repo.revision, rev, rev2)) else: # Check out the relevant repo with the right bits in it. if repo.branch: br = repo.branch else: br = "master" # Because there are files here, we need to be a bit cunning. utils.run0("git init") utils.run0("git remote add origin %s" % repo.base_url) utils.run0("git fetch origin") if repo.revision: rev = repo.revision br = None else: rev = "HEAD" if (br is None): utils.run0("git checkout %s" % repo.revision) else: utils.run0("git checkout -b %s --track origin/%s" % (br, br))
def ensure_version(self, builder, repo, co_leaf, options, verbose = True): """ Ensures that the root git repo is the right version. If it exists, will error out if it isn't. Run in the root directory. """ if (os.path.exists(".git")): # Get the checkout revision. rr = repo.revision if (rr is None): rr = 'HEAD' rev = self._calculate_revision(self, rr) # Now get the version we have .. rev2 = self._calculate_revision(self, 'HEAD') if (rev != rev2): raise GiveUp("git repo required for %s is revision (%s) %s, but we have %s"%(co_leaf, repo.revision, rev,rev2)) else: # Check out the relevant repo with the right bits in it. if repo.branch: br = repo.branch else: br = "master" # Because there are files here, we need to be a bit cunning. utils.run0("git init") utils.run0("git remote add origin %s"%repo.base_url) utils.run0("git fetch origin") if repo.revision: rev = repo.revision br = None else: rev = "HEAD" if (br is None): utils.run0("git checkout %s"%repo.revision) else: utils.run0("git checkout -b %s --track origin/%s"%(br,br))
def deploy(self, builder, label, target_base): for asm in self.assemblies: src = os.path.join(asm.get_source_dir(builder), asm.from_rel) dst = os.path.join(target_base, asm.to_name) if not os.path.exists(src): if asm.fail_on_absent_source: raise GiveUp("Deployment %s: source object %s does not" " exist." % (label.name, src)) # Else no one cares :-) else: # If this is a file, just copy it. if (not os.path.isdir(src)): if (asm.using_rsync): utils.shell("rsync -avz \"%s\" \"%s\"" % (src, dst)) else: utils.copy_file(src, dst, object_exactly=asm.copy_exactly) elif asm.using_rsync: # Rsync for great speed! try: os.makedirs(dst) except OSError: pass xdst = dst if xdst[-1] != "/": xdst = xdst + "/" utils.run0("rsync -avz \"%s/.\" \"%s\"" % (src, xdst)) elif asm.recursive: utils.recursively_copy(src, dst, object_exactly=asm.copy_exactly) else: utils.copy_file(src, dst, object_exactly=asm.copy_exactly)
def apply(self, builder, instr, role, path): if (instr.type == "char"): mknod_type = "c" else: mknod_type = "b" abs_file = os.path.join(path, instr.file_name) utils.run0("mknod %s %s %s %s"%(abs_file, mknod_type, instr.major, instr.minor)) utils.run0("chown %s:%s %s"%(instr.uid, instr.gid, abs_file)) utils.run0("chmod %s %s"%(instr.mode, abs_file))
def apply(self, builder, instr, role, path): if (instr.type == "char"): mknod_type = "c" else: mknod_type = "b" abs_file = os.path.join(path, instr.file_name) utils.run0("mknod %s %s %s %s" % (abs_file, mknod_type, instr.major, instr.minor)) utils.run0("chown %s:%s %s" % (instr.uid, instr.gid, abs_file)) utils.run0("chmod %s %s" % (instr.mode, abs_file))
def deploy(self, builder, label): deploy_dir = builder.deploy_path(label) # First off, delete the target directory utils.recursively_remove(deploy_dir) utils.ensure_dir(deploy_dir) for role, domain in self.roles: if domain: print "> %s: Deploying role %s in domain %s .. " % ( label.name, role, domain) else: print "> %s: Deploying role %s .. " % (label.name, role) install_dir = builder.role_install_path(role, domain=domain) utils.recursively_copy(install_dir, deploy_dir, object_exactly=True) # This is somewhat tricky as it potentially requires privilege elevation. # Privilege elevation is done by hooking back into ourselves via a # build command to a label we registered earlier. # # Note that you cannot split instruction application - once the first # privilege-requiring instruction is executed, all further instructions # may require privilege even if they didn't before (e.g. a chmod after # chown) # First off, do we need to at all? need_root_for = set() for role, domain in self.roles: lbl = depend.Label(utils.LabelType.Package, "*", role, "*", domain=domain) install_dir = builder.role_install_path(role, domain=label.domain) instr_list = builder.load_instructions(lbl) for (lbl, fn, instr_file) in instr_list: # Obey this instruction? for instr in instr_file: iname = instr.outer_elem_name() if iname in self.app_dict: if self.app_dict[iname].needs_privilege( builder, instr, role, install_dir): need_root_for.add(iname) # Deliberately do not break - we want to check everything for # validity before acquiring privilege. else: raise utils.GiveUp( "File deployments don't know about " + "instruction %s" % iname + " found in label %s (filename %s)" % (lbl, fn)) print "Rerunning muddle to apply instructions .. " permissions_label = depend.Label( utils.LabelType.Deployment, label.name, None, # XXX label.role, utils.LabelTag.InstructionsApplied, domain=label.domain) if need_root_for: print "I need root to do %s - sorry! - running sudo .." % ( ', '.join(sorted(need_root_for))) utils.run0("sudo %s buildlabel '%s'" % (builder.muddle_binary, permissions_label)) else: utils.run0("%s buildlabel '%s'" % (builder.muddle_binary, permissions_label))
def build_label(self,builder, label): """ Actually cpio everything up, following instructions appropriately. """ if label.type not in (LabelType.Deployment, LabelType.Package): raise GiveUp("Attempt to build a CPIO deployment with a label" " of type %s"%(label.type)) if label.type == LabelType.Deployment and label.tag != LabelTag.Deployed: raise GiveUp("Attempt to build a CPIO deployment with a" " deployment label of type %s"%(label.tag)) elif label.type == LabelType.Package and label.tag != LabelTag.PostInstalled: raise GiveUp("Attempt to build a CPIO deployment with a" " package label of type %s"%(label.tag)) # Collect all the relevant files .. if label.type == LabelType.Deployment: deploy_dir = builder.deploy_path(label) else: # XXX Would it be better to use package_obj_path(label) ??? deploy_dir = builder.package_install_path(label) deploy_file = os.path.join(deploy_dir, self.target_file) utils.ensure_dir(os.path.dirname(deploy_file)) the_hierarchy = cpiofile.Hierarchy({ }, { }) for l ,bt in self.target_base: if type( bt ) == types.TupleType: real_source_path = os.path.join(builder.role_install_path(l.role, l.domain), bt[0]) # This is bt[1] - the actual destination. base is computed differently # (bt[2]) for applying instructions. base = bt[1] else: base = bt real_source_path = os.path.join(builder.role_install_path(l.role, l.domain)) print "Collecting %s for deployment to %s .. "%(l,base) if (len(base) > 0 and base[0] != '/'): base = "/%s"%(base) m = cpiofile.hierarchy_from_fs(real_source_path, base) the_hierarchy.merge(m) # Normalise the hierarchy .. the_hierarchy.normalise() print "Filesystem hierarchy is:\n%s"%the_hierarchy.as_str(builder.db.root_path) if self.prune_function: self.prune_function(the_hierarchy) app_dict = _get_instruction_dict() # Apply instructions. We actually need an intermediate list here, # because you might have the same role with several different # sources and possibly different bases. to_apply = {} for src, bt in self.target_base: if type(bt) == types.TupleType: base = bt[2] else: base = bt to_apply[ (src, base) ] = (src, bt) # Now they are unique .. for src, bt in to_apply.values(): if type(bt) == types.TupleType: base = bt[2] else: base = bt print "base = %s"%(base) lbl = depend.Label(LabelType.Package, "*", src.role, "*", domain = src.domain) print "Scanning instructions for role %s, domain %s .. "%(src.role, src.domain) instr_list = builder.load_instructions(lbl) for lbl, fn, instrs in instr_list: print "CPIO deployment: Applying instructions for role %s, label %s .. "%(src.role, lbl) for instr in instrs: iname = instr.outer_elem_name() #print 'Instruction:', iname if iname in app_dict: print 'Instruction:', str(instr) app_dict[iname].apply(builder, instr, lbl.role, base, the_hierarchy) else: print 'Instruction:', iname raise GiveUp("CPIO deployments don't know about " "the instruction %s (lbl %s, file %s)"%(iname, lbl, fn)) # .. and write the file. print "> Writing %s .. "%deploy_file the_hierarchy.render(deploy_file, True) if (self.compression_method is not None): if (self.compression_method == "gzip"): utils.run0(["gzip", "-f", deploy_file]) elif (self.compression_method == "bzip2"): utils.run0(["bzip2", "-f", deploy_file]) else: raise GiveUp("Invalid compression method %s"%self.compression_method + "specified for cpio deployment. Pick gzip or bzip2.")
def build_label(self, builder, label): """ Build the relevant label. """ self.ensure_dirs(builder, label) tag = label.tag if (tag == utils.LabelTag.PreConfig): # Nothing to do. pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): pass elif (tag == utils.LabelTag.Installed): # Concoct a suitable dpkg command. inv = builder # Extract into the object directory .. so I can depend on them later. # - actually, Debian packaging doesn't work like that. Rats. # - rrw 2009-11-24 #extract_into_obj(inv, self.co_name, label, self.pkg_file) inst_dir = inv.package_install_path(label) tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_dir = inv.checkout_path(tmp) # Using dpkg doesn't work here for many reasons. dpkg_cmd = [ "dpkg-deb", "-X", os.path.join(co_dir, self.pkg_file), inst_dir ] utils.run0(dpkg_cmd) # Pick up any instructions that got left behind instr_file = self.instr_name if (instr_file is None): instr_file = "%s.instructions.xml" % (label.name) instr_path = os.path.join(co_dir, instr_file) if (os.path.exists(instr_path)): # We have instructions .. ifile = db.InstructionFile(instr_path) ifile.get() builder.instruct(label.name, label.role, ifile) elif (tag == utils.LabelTag.PostInstalled): if self.post_install_makefile is not None: inv = builder tmp = Label(utils.LabelType.Checkout, self.co_name, domain=label.domain) co_path = inv.checkout_path(tmp) with Directory(co_path): utils.run0([ "make", "-f", self.post_install_makefile, "%s-postinstall" % label.name ]) elif (tag == utils.LabelTag.Clean or tag == utils.LabelTag.DistClean): # inv = builder admin_dir = os.path.join(inv.package_obj_path(label)) utils.recursively_remove(admin_dir) else: raise utils.MuddleBug("Invalid tag specified for deb pkg %s" % (label))
def _do_cmdline(args): original_dir = os.getcwd() original_env = os.environ.copy() dry_run = False verbose = False # TODO: allow switches after args. while args: word = args[0] if word in ("-h", "--help", "-?"): print __doc__ return elif word in ("--dry-run", "-n"): dry_run = True elif word in ("-v", "--verbose"): verbose = True elif word[0] == "-": raise GiveUp, "Unexpected command line option %s" % word else: break args = args[1:] if len(args) != 0: raise GiveUp, "Unexpected non-option arguments given" builder = find_and_load(original_dir, muddle_binary=None) # Don't bother determining muddle_binary: our invocation of find_and_load # doesn't make use of it. (Tibs writes: it's only needed for when # running makefiles, for when they use $(MUDDLE).) if not builder: raise GiveUp("Cannot find a build tree.") rootrepo = builder.db.RootRepository_pathfile.get() rules = builder.all_checkout_rules() rr = [] for r in rules: co_dir = builder.db.get_checkout_path(r.target) if isinstance(r.action.vcs, muddled.vcs.git.Git): if verbose: print "In %s:" % co_dir os.chdir(co_dir) raw = get_cmd_data("git show-ref --heads", verbose=verbose) raw_heads = raw[1].rstrip("\n").split("\n") pat = re.compile("[0-9a-f]+ refs/heads/(.+)") heads = set() for h in raw_heads: m = pat.match(h) if m is None: raise GiveUp("Unparseable output from git: %s" % h) heads.add(m.group(1)) g = r.action.vcs # print "heads is %s"%heads.__str__() if g.branch is not None: if g.branch in heads: if verbose: print "%s: ok (has %s)" % (co_dir, g.branch) else: bfrom = "master" # desired branch not found; if we have a master then try to fixup: if bfrom in heads: # if verbose: print "===\nFixing %s: %s --> %s" % (co_dir, bfrom, g.branch) (rc, lines, igno) = get_cmd_data("git status --porcelain -uall", verbose=verbose) lines = lines.rstrip("\n") if lines != "": if not verbose: print "> git status --porcelain -uall" print ">>%s<<" % lines print ( "Uncommitted changes or untracked files found in %s, deal with these before continuing" % co_dir ) raise GiveUp maybe_run_cmd("git fetch origin %s" % (g.branch), dry_run, verbose) maybe_run_cmd("git fetch origin %s:%s" % (g.branch, g.branch), dry_run, verbose) maybe_run_cmd("git checkout %s" % g.branch, dry_run, verbose) maybe_run_cmd("git config branch.%s.remote origin" % g.branch, dry_run, verbose) maybe_run_cmd("git config branch.%s.merge %s" % (g.branch, g.branch), dry_run, verbose) try: maybe_run_cmd("git branch -d %s" % bfrom, dry_run, verbose) except GiveUp: print "\n* * * HEALTH WARNING * * *" print "Unmerged changes were found committed to the '%s' branch in %s" % (bfrom, co_dir) print "YOU MUST MERGE THESE INTO '%s' YOURSELF OR LOSE THEM!" % g.branch # print "This script will not revisit this checkout." print "The relevant changes are:" run0("git log --oneline --topo-order --graph --decorate=short %s..%s" % (g.branch, bfrom)) raise else: raise GiveUp( "Error: %s wants a branch named '%s', does not have one, and does not have a '%s' either - I don't know how to fix this" % (co_dir, g.branch, bfrom) ) else: # want master, don't care about others if verbose: print "%s heads are: %s" % (co_dir, heads) if not "master" in heads: raise GiveUp( "Error: %s wants a 'master' branch but does not have one, I don't know how to fix this" % co_dir ) else: if verbose: print "Ignoring %s (not powered by git)" % co_dir
def check_out(self): utils.ensure_dir(self.checkout_path) os.chdir(self.checkout_path) utils.run0("wget %s --output-document=%s" % (self.url, self.filename))
def check_out(self): utils.ensure_dir(self.checkout_path) os.chdir(self.checkout_path) utils.run0("wget %s --output-document=%s"%(self.url,self.filename))
def build_label(self, builder, label): our_dir = builder.package_obj_path(label) dirlist = [] tag = label.tag if (tag == utils.LabelTag.Built or tag == utils.LabelTag.Installed): for (l, s) in self.components: tmp = Label(utils.LabelType.Package, l.name, l.role, domain=label.domain) root_dir = builder.package_install_path(tmp) dirlist.append((root_dir, s)) print "dirlist:" for (x, y) in dirlist: print "%s=%s \n" % (x, y) if (tag == utils.LabelTag.PreConfig): pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): # OK. Building. This is ghastly .. utils.recursively_remove(our_dir) utils.ensure_dir(our_dir) # Now we need to copy all the subdirs in .. for (root, sub) in dirlist: utils.ensure_dir(utils.rel_join(our_dir, sub)) # Only bother to copy kernel modules. names = utils.find_by_predicate(utils.rel_join(root, sub), predicate_is_kernel_module) utils.copy_name_list_with_dirs(names, utils.rel_join(root, sub), utils.rel_join(our_dir, sub)) # .. and run depmod. depmod = "depmod" if (self.custom_depmod is not None): depmod = self.custom_depmod # Because depmod is brain-dead, we need to give it explicit versions. names = os.listdir(utils.rel_join(our_dir, "lib/modules")) our_re = re.compile(r'\d+\.\d+\..*') for n in names: if (our_re.match(n) is not None): print "Found kernel version %s in %s .. " % (n, our_dir) utils.run0("%s -b %s %s" % (depmod, our_dir, n)) elif (tag == utils.LabelTag.Installed): # Now we find all the modules.* files in our_dir and copy them over # to our install directory names = utils.find_by_predicate(our_dir, predicate_is_module_db) tgt_dir = builder.package_install_path(label) utils.copy_name_list_with_dirs(names, our_dir, tgt_dir) for n in names: new_n = utils.replace_root_name(our_dir, tgt_dir, n) print "Installed: %s" % (new_n) elif (tag == utils.LabelTag.Clean): utils.recursively_remove(our_dir) elif (tag == utils.LabelTag.DistClean): utils.recursively_remove(our_dir)
def maybe_run_cmd(cmd, dry_run, verbose): if dry_run: print "(DRY RUN) > %s" % cmd else: run0(cmd, show_output=verbose)
def build_label(self, builder, label): our_dir = builder.package_obj_path(label) dirlist = [ ] tag = label.tag if (tag == utils.LabelTag.Built or tag == utils.LabelTag.Installed): for (l,s) in self.components: tmp = Label(utils.LabelType.Package, l.name, l.role, domain=label.domain) root_dir = builder.package_install_path(tmp) dirlist.append( (root_dir, s) ) print "dirlist:" for (x,y) in dirlist: print "%s=%s \n"%(x,y) if (tag == utils.LabelTag.PreConfig): pass elif (tag == utils.LabelTag.Configured): pass elif (tag == utils.LabelTag.Built): # OK. Building. This is ghastly .. utils.recursively_remove(our_dir) utils.ensure_dir(our_dir) # Now we need to copy all the subdirs in .. for (root, sub) in dirlist: utils.ensure_dir(utils.rel_join(our_dir, sub)) # Only bother to copy kernel modules. names = utils.find_by_predicate(utils.rel_join(root, sub), predicate_is_kernel_module) utils.copy_name_list_with_dirs(names, utils.rel_join(root,sub), utils.rel_join(our_dir, sub)) # .. and run depmod. depmod = "depmod" if (self.custom_depmod is not None): depmod = self.custom_depmod # Because depmod is brain-dead, we need to give it explicit versions. names = os.listdir(utils.rel_join(our_dir, "lib/modules")) our_re = re.compile(r'\d+\.\d+\..*') for n in names: if (our_re.match(n) is not None): print "Found kernel version %s in %s .. "%(n, our_dir) utils.run0("%s -b %s %s"%(depmod, our_dir, n)) elif (tag == utils.LabelTag.Installed): # Now we find all the modules.* files in our_dir and copy them over # to our install directory names = utils.find_by_predicate(our_dir, predicate_is_module_db) tgt_dir = builder.package_install_path(label) utils.copy_name_list_with_dirs(names, our_dir, tgt_dir) for n in names: new_n = utils.replace_root_name(our_dir, tgt_dir, n) print "Installed: %s"%(new_n) elif (tag == utils.LabelTag.Clean): utils.recursively_remove(our_dir) elif (tag == utils.LabelTag.DistClean): utils.recursively_remove(our_dir)