def restore_stash(self): """Restore previous state of plenary. This should only be called while holding an appropriate lock. """ if not self.stashed: self.logger.info("Attempt to restore plenary '%s' " "without having saved state." % self.old_path) return # Should this optimization be in use? # if not self.changed and not self.removed: # return # If the plenary has moved, then we need to clean up the new location if self.new_path and self.new_path != self.old_path: self.logger.debug("Removing %r [%s]" % (self, self.new_path)) remove_file(self.new_path, cleanup_directory=True, logger=self.logger) self.logger.debug("Restoring %r [%s]" % (self, self.old_path)) if self.old_content is None: remove_file(self.old_path, cleanup_directory=True, logger=self.logger) else: write_file(self.old_path, self.old_content, create_directory=True, logger=self.logger) atime = os.stat(self.old_path).st_atime os.utime(self.old_path, (atime, self.old_mtime))
def render(self, session, logger, branch, sandbox, bundle, sync, rebase, **arguments): # Most of the logic here is duplicated in deploy if branch: sandbox = branch dbsandbox = Sandbox.get_unique(session, sandbox, compel=True) (handle, filename) = mkstemp() contents = b64decode(bundle) write_file(filename, contents, logger=logger) if sync and not dbsandbox.is_sync_valid and dbsandbox.trackers: # FIXME: Maybe raise an ArgumentError and request that the # command run with --nosync? Maybe provide a --validate flag? # For now, we just auto-flip anyway (below) making the point moot. pass if not dbsandbox.is_sync_valid: dbsandbox.is_sync_valid = True if rebase and dbsandbox.trackers: raise ArgumentError("{0} has trackers, rebasing is not allowed." .format(dbsandbox)) kingdir = self.config.get("broker", "kingdir") rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="publish_", suffix="_%s" % dbsandbox.name, dir=rundir) try: run_git(["clone", "--shared", "--branch", dbsandbox.name, kingdir, dbsandbox.name], path=tempdir, logger=logger) temprepo = os.path.join(tempdir, dbsandbox.name) run_git(["bundle", "verify", filename], path=temprepo, logger=logger) ref = "HEAD:%s" % (dbsandbox.name) command = ["pull", filename, ref] if rebase: command.append("--force") run_git(command, path=temprepo, logger=logger, loglevel=CLIENT_INFO) # FIXME: Run tests before pushing back to template-king if rebase: target_ref = "+" + dbsandbox.name else: target_ref = dbsandbox.name run_git(["push", "origin", target_ref], path=temprepo, logger=logger) except ProcessException, e: raise ArgumentError("\n%s%s" % (e.out, e.err))
def write(self, locked=False): """Write out the template. If the content is unchanged, then the file will not be modified (preserving the mtime). Returns the number of files that were written. If locked is True then it is assumed that error handling happens higher in the call stack. """ if self.template_type == "object" and \ hasattr(self.dbobj, "personality") and \ self.dbobj.personality and \ not self.dbobj.personality.archetype.is_compileable: return 0 # This is a hack to handle the case when the DB object has been deleted, # but a plenary instance still references it (probably buried inside a # PlenaryCollection). Calling self.will_change() on such a plenary would # fail, because the primary key is None, which is otherwise impossible. if isinstance(self.dbobj, Base): state = inspect(self.dbobj) if state.deleted: return 0 if not self.new_content: self.new_content = self._generate_content() content = self.new_content key = None try: if not locked: key = self.get_key() lock_queue.acquire(key) self.stash() if self.old_content == content and \ not self.removed and not self.changed: # optimise out the write (leaving the mtime good for ant) # if nothing is actually changed return 0 if not self.new_path: raise InternalError("New path not set - likely write() is " "called on deleted object.") # If the plenary has moved, then clean up any potential leftover # files from the old location if self.new_path != self.old_path: self.remove(locked=True) self.logger.debug("Writing %r [%s]" % (self, self.new_path)) write_file(self.new_path, content, create_directory=True, logger=self.logger) self.removed = False if self.old_content != content: self.changed = True except Exception, e: if not locked: self.restore_stash() raise e
# the index should just list (advertise) the profile as a .xml # file. if transparent_gzip: advertise_suffix = suffix.rstrip(".gz") else: advertise_suffix = suffix content.append("<profile mtime='%d'>%s%s</profile>" % (mtime, obj, advertise_suffix)) content.append("</profiles>") compress = None if gzip_index: compress = 'gzip' write_file(index_path, "\n".join(content), logger=logger, compress=compress) logger.debug("Updated %s, %d objects modified", index_path, len(modified_index)) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) if config.has_option("broker", "bind_address"): bind_address = socket.gethostbyname(config.get("broker", "bind_address")) if config.has_option("broker", "cdp_send_port"): # pragma: no cover port = config.get_int("broker", "cdp_send_port") else: port = 0 sock.bind((bind_address, port)) if config.has_option("broker", "server_notifications"):