def render(self, session, dbuser, domain, comments, compiler_version, autosync, change_manager, allow_manage, **arguments): dbdomain = Domain.get_unique(session, domain, compel=True) # FIXME: proper authorization if dbdomain.owner != dbuser and dbuser.role.name != 'aqd_admin': raise AuthorizationException("Only the owner or an AQD admin can " "update a domain.") if comments: dbdomain.comments = comments if compiler_version: dbdomain.compiler = expand_compiler(self.config, compiler_version) if autosync is not None: dbdomain.autosync = autosync if change_manager is not None: if dbdomain.tracked_branch: raise ArgumentError("Cannot enforce a change manager for " "tracking domains.") dbdomain.requires_change_manager = change_manager if allow_manage is not None: dbdomain.allow_manage = allow_manage session.flush() return
def render(self, session, domain, **arguments): return Domain.get_unique( session, domain, compel=True, query_options=[undefer('comments'), joinedload('owner')])
def render(self, session, logger, domain, ref, lastsync, **arguments): dbdomain = Domain.get_unique(session, domain, compel=True) if not dbdomain.tracked_branch: # Could check dbdomain.trackers and rollback all of them... raise ArgumentError("rollback requires a tracking domain") if lastsync: if not dbdomain.rollback_commit: raise ArgumentError("domain %s does not have a rollback " "commit saved, please specify one " "explicitly." % dbdomain.name) ref = dbdomain.rollback_commit if not ref: raise ArgumentError("Commit reference to rollback to required.") kingdir = self.config.get("broker", "kingdir") domaindir = os.path.join(self.config.get("broker", "domainsdir"), dbdomain.name) out = run_git(["branch", "--contains", ref], logger=logger, path=kingdir) if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out): # There's no real technical reason why this needs to be # true. It just seems like a good sanity check. raise ArgumentError("Cannot roll back to commit: " "branch %s does not contain %s" % (dbdomain.tracked_branch.name, ref)) dbdomain.tracked_branch.is_sync_valid = False session.add(dbdomain.tracked_branch) dbdomain.rollback_commit = None session.add(dbdomain) key = CompileKey(domain=dbdomain.name, logger=logger) try: lock_queue.acquire(key) run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)], path=kingdir, logger=logger) # Duplicated this logic from aquilon.worker.processes.sync_domain() run_git(["fetch"], path=domaindir, logger=logger) run_git(["reset", "--hard", "origin/%s" % dbdomain.name], path=domaindir, logger=logger) except ProcessException, e: raise ArgumentError( "Problem encountered updating templates for " "domain %s: %s", dbdomain.name, e)
def render(self, session, logger, domain, **arguments): dbdomain = Domain.get_unique(session, domain, compel=True) if not dbdomain.tracked_branch: # Could check dbdomain.trackers and sync all of them... raise ArgumentError("sync requires a tracking domain") if not dbdomain.tracked_branch.is_sync_valid: raise ArgumentError("Tracked branch %s is set to not allow sync. " "Run aq validate to mark it as valid." % dbdomain.tracked_branch.name) try: sync_domain(dbdomain, logger=logger) except ProcessException, e: raise ArgumentError("Problem encountered updating templates for " "domain %s: %s", dbdomain.name, e)
def render(self, session, logger, domain, ref, lastsync, **arguments): dbdomain = Domain.get_unique(session, domain, compel=True) if not dbdomain.tracked_branch: # Could check dbdomain.trackers and rollback all of them... raise ArgumentError("rollback requires a tracking domain") if lastsync: if not dbdomain.rollback_commit: raise ArgumentError("domain %s does not have a rollback " "commit saved, please specify one " "explicitly." % dbdomain.name) ref = dbdomain.rollback_commit if not ref: raise ArgumentError("Commit reference to rollback to required.") kingdir = self.config.get("broker", "kingdir") domaindir = os.path.join(self.config.get("broker", "domainsdir"), dbdomain.name) out = run_git(["branch", "--contains", ref], logger=logger, path=kingdir) if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out): # There's no real technical reason why this needs to be # true. It just seems like a good sanity check. raise ArgumentError("Cannot roll back to commit: " "branch %s does not contain %s" % (dbdomain.tracked_branch.name, ref)) dbdomain.tracked_branch.is_sync_valid = False session.add(dbdomain.tracked_branch) dbdomain.rollback_commit = None session.add(dbdomain) key = CompileKey(domain=dbdomain.name, logger=logger) try: lock_queue.acquire(key) run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)], path=kingdir, logger=logger) # Duplicated this logic from aquilon.worker.processes.sync_domain() run_git(["fetch"], path=domaindir, logger=logger) run_git(["reset", "--hard", "origin/%s" % dbdomain.name], path=domaindir, logger=logger) except ProcessException, e: raise ArgumentError("Problem encountered updating templates for " "domain %s: %s", dbdomain.name, e)
def get_branch_and_author(session, logger, domain=None, sandbox=None, branch=None, compel=False): dbbranch = None dbauthor = None if domain: dbbranch = Domain.get_unique(session, domain, compel=True) elif branch: dbbranch = Branch.get_unique(session, branch, compel=True) elif sandbox: (author, slash, name) = sandbox.partition('/') if not slash: raise ArgumentError("Expected sandbox as 'author/branch', author " "name and branch name separated by a slash.") dbbranch = Sandbox.get_unique(session, name, compel=True) dbauthor = get_user_principal(session, author) elif compel: raise ArgumentError("Please specify either sandbox or domain.") return (dbbranch, dbauthor)
def main(): print "Calculating sandbox base commits. This may take around 10 minutes." logging.basicConfig(level=logging.WARNING) kingdir = config.get("broker", "kingdir") domains = session.query(Domain).all() # Define preference order when multiple domains have the same commits. # This is just cosmetics, but makes it easier to verify the output. for idx, domain in enumerate(("prod", "qa", "secure-aquilon-prod", "secure-aquilon-qa")): dbdom = Domain.get_unique(session, domain, compel=True) domains.remove(dbdom) domains.insert(idx, dbdom) base_commits = {} q = session.query(Sandbox) q = q.order_by('name') # The base_commit column does not exist yet... q = q.options(defer("base_commit")) for sandbox in q: base_domain = None base_commit = None min_ahead = None commits = run_git(["rev-list", "refs/heads/" + sandbox.name], path=kingdir).split("\n") for domain in domains: merge_base = run_git(["merge-base", "refs/heads/" + sandbox.name, "refs/heads/" + domain.name], path=kingdir).strip() # Number of commits since branching from the given domain ahead = commits.index(merge_base) if base_domain is None or ahead < min_ahead: base_domain = domain base_commit = merge_base min_ahead = ahead if min_ahead == 0: break print "{0: <40}: {1.name} (ahead {2})".format(sandbox, base_domain, min_ahead) base_commits[sandbox.name] = base_commit session.expunge_all() try: if session.bind.dialect.name == 'oracle': query = text(""" ALTER TABLE sandbox ADD base_commit VARCHAR2(40 CHAR) """) elif session.bind.dialect.name == 'postgresql': query = text(""" ALTER TABLE sandbox ADD base_commit CHARACTER VARYING (40) """) print "\nExecuting: %s" % query session.execute(query) session.commit() except DatabaseError: # Allow the script to be re-run by not failing if the column already # exists. If the column does not exist, then trying to update it will # fail anyway. print """ WARNING: Adding the sandbox.base_commit column has failed. If you're running this script for the second time, then that's likely OK, otherwise you should verify and correct the schema manually. """ session.rollback() for sandbox in q: sandbox.base_commit = base_commits[sandbox.name] session.commit() try: if session.bind.dialect.name == 'oracle': query = text(""" ALTER TABLE sandbox MODIFY (base_commit VARCHAR2(40 CHAR) CONSTRAINT sandbox_base_commit_nn NOT NULL) """) elif session.bind.dialect.name == 'postgresql': query = text(""" ALTER TABLE sandbox ALTER COLUMN base_commit SET NOT NULL """) print "\nExecuting: %s" % query session.execute(query) session.commit() except DatabaseError: print """ WARNING: Enabling the NOT NULL constraint for sandbox.base_commit column has failed. If you're running this script for the second time, then that's likely OK, otherwise you should verify and correct the schema manually. """ session.rollback()
def render(self, session, logger, dbuser, domain, track, start, change_manager, comments, allow_manage, **arguments): if not dbuser: raise AuthorizationException("Cannot create a domain without " "an authenticated connection.") Branch.get_unique(session, domain, preclude=True) valid = re.compile('^[a-zA-Z0-9_.-]+$') if (not valid.match(domain)): raise ArgumentError("Domain name '%s' is not valid." % domain) # FIXME: Verify that track is a valid branch name? # Or just let the branch command fail? compiler = self.config.get("panc", "pan_compiler") dbtracked = None if track: dbtracked = Branch.get_unique(session, track, compel=True) if getattr(dbtracked, "tracked_branch", None): raise ArgumentError("Cannot nest tracking. Try tracking " "{0:l} directly.".format( dbtracked.tracked_branch)) start_point = dbtracked if change_manager: raise ArgumentError("Cannot enforce a change manager for " "tracking domains.") else: if not start: start = self.config.get("broker", "default_domain_start") start_point = Branch.get_unique(session, start, compel=True) dbdomain = Domain(name=domain, owner=dbuser, compiler=compiler, tracked_branch=dbtracked, requires_change_manager=bool(change_manager), comments=comments) session.add(dbdomain) if allow_manage is not None: dbdomain.allow_manage = allow_manage session.flush() domainsdir = self.config.get("broker", "domainsdir") clonedir = os.path.join(domainsdir, dbdomain.name) if os.path.exists(clonedir): raise InternalError("Domain directory already exists") kingdir = self.config.get("broker", "kingdir") cmd = ["branch"] if track: cmd.append("--track") else: cmd.append("--no-track") cmd.append(dbdomain.name) cmd.append(start_point.name) run_git(cmd, path=kingdir, logger=logger) # If the branch command above fails the DB will roll back as normal. # If the command below fails we need to clean up from itself and above. try: run_git( ["clone", "--branch", dbdomain.name, kingdir, dbdomain.name], path=domainsdir, logger=logger) except ProcessException, e: try: remove_dir(clonedir, logger=logger) run_git(["branch", "-D", dbdomain.name], path=kingdir, logger=logger) except ProcessException, e2: logger.info("Exception while cleaning up: %s", e2)
from nose.tools import eq_ NUM_MACHINES = 2 DNAME = 'ms.com' DNSENV = 'internal' SHORT_NAME_PREFIX = 'aqdb-test-host-' MACHINE_NAME_PREFIX = 'test_machine_' sess = DbFactory().Session() assert sess, 'No session in %s' % func_name() #TODO: factor out assert_type(obj, cls, func_name) the isinstance calls STATUS = Status.get_unique(sess, 'ready') assert isinstance(STATUS, Status), 'No ready status @ %s' % func_name() DOMAIN = Domain.get_unique(sess, 'ny-prod') assert isinstance(DOMAIN, Domain), 'no ny-prod domain @ %s' % func_name() ARCH = Archetype.get_unique(sess, 'aquilon') assert isinstance(ARCH, Archetype), 'No archetype @ %s' % func_name() OS = OperatingSystem.get_unique(sess, name='linux', version='5.0.1-x86_64', archetype=ARCH) assert isinstance(OS, OperatingSystem), 'No os @ %s' % func_name() PRSNLTY = Personality.get_unique(sess, name='generic', archetype=ARCH) assert isinstance(PRSNLTY, Personality), 'no personality @ %s' % func_name() NETWORK = sess.query(Network).filter(Network.cidr < 31).first()
def render(self, session, logger, domain, **arguments): dbdomain = Domain.get_unique(session, domain, compel=True) remove_branch(self.config, logger, dbdomain) return
def render(self, session, domain, **arguments): return Domain.get_unique(session, domain, compel=True, query_options=[undefer('comments'), joinedload('owner')])
def refresh_windows_hosts(self, session, logger, containers): conn = sqlite3.connect(self.config.get("broker", "windows_host_info")) # Enable dictionary-style access to the rows. conn.row_factory = sqlite3.Row windows_hosts = {} interfaces = {} cur = conn.cursor() # There are more fields in the dataset like machine and # aqhostname that might be useful for error messages but these # are sufficient. cur.execute("select ether, windowshostname from machines") for row in cur: host = row["windowshostname"] if host: host = host.strip().lower() else: continue mac = row["ether"] if mac: mac = mac.strip().lower() windows_hosts[host] = mac interfaces[mac] = host success = [] failed = [] q = session.query(Host) q = q.filter_by(comments='Created by refresh_windows_host') for dbhost in q.all(): mac_addresses = [iface.mac for iface in dbhost.machine.interfaces] if dbhost.fqdn in windows_hosts and \ windows_hosts[dbhost.fqdn] in mac_addresses: # All is well continue deps = get_host_dependencies(session, dbhost) if deps: msg = "Skipping removal of host %s with dependencies: %s" % \ (dbhost.fqdn, ", ".join(deps)) failed.append(msg) logger.info(msg) continue dbmachine = dbhost.machine success.append("Removed host entry for %s (%s)" % (dbmachine.label, dbmachine.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.delete(dbhost) dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.flush() # The Host() creations below fail when autoflush is enabled. session.autoflush = False dbdomain = Domain.get_unique(session, self.config.get("archetype_windows", "host_domain"), compel=InternalError) dbarchetype = Archetype.get_unique(session, "windows", compel=InternalError) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name="generic", compel=InternalError) dbstatus = HostLifecycle.get_unique(session, "ready", compel=InternalError) dbos = OperatingSystem.get_unique(session, name="windows", version="generic", archetype=dbarchetype, compel=InternalError) for (host, mac) in windows_hosts.items(): try: (short, dbdns_domain) = parse_fqdn(session, host) except AquilonError, err: msg = "Skipping host %s: %s" % (host, err) failed.append(msg) logger.info(msg) continue existing = DnsRecord.get_unique(session, name=short, dns_domain=dbdns_domain) if existing: if not existing.hardware_entity: msg = "Skipping host %s: It is not a primary name." % host failed.append(msg) logger.info(msg) continue # If these are invalid there should have been a deletion # attempt above. if not existing.hardware_entity.interfaces: msg = "Skipping host %s: Host already exists but has " \ "no interface attached." % host failed.append(msg) logger.info(msg) elif existing.hardware_entity.interfaces[0].mac != mac: msg = "Skipping host %s: Host already exists but with " \ "MAC address %s and not %s." % \ (host, existing.hardware_entity.interfaces[0].mac, mac) failed.append(msg) logger.info(msg) continue dbinterface = session.query(Interface).filter_by(mac=mac).first() if not dbinterface: msg = "Skipping host %s: MAC address %s is not present in " \ "AQDB." % (host, mac) failed.append(msg) logger.info(msg) continue q = session.query(Machine) q = q.filter_by(id=dbinterface.hardware_entity.id) dbmachine = q.first() if not dbmachine: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is tied to hardware %s instead of a virtual " \ "machine." % \ (host, mac, dbinterface.hardware_entity.label) failed.append(msg) logger.info(msg) continue if dbinterface.assignments: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbinterface.assignments[0].fqdns[0]) failed.append(msg) logger.info(msg) continue if dbmachine.host: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbmachine.fqdn) failed.append(msg) logger.info(msg) continue dbhost = Host(machine=dbmachine, branch=dbdomain, status=dbstatus, owner_grn=dbpersonality.owner_grn, personality=dbpersonality, operating_system=dbos, comments="Created by refresh_windows_host") session.add(dbhost) if self.config.has_option("archetype_windows", "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_", "default_grn_target"))) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, preclude=True) dbdns_rec = ReservedName(fqdn=dbfqdn) session.add(dbdns_rec) dbmachine.primary_name = dbdns_rec success.append("Added host entry for %s (%s)." % (dbmachine.label, dbdns_rec.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.flush()
success.append("Removed host entry for %s (%s)" % (dbmachine.label, dbmachine.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) logger.info("Deleting {0:l} (machine {1.label})" .format(dbhost, dbmachine)) session.delete(dbhost) dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.flush() # The Host() creations below fail when autoflush is enabled. session.autoflush = False dbdomain = Domain.get_unique(session, self.config.get("archetype_windows", "host_domain"), compel=InternalError) dbarchetype = Archetype.get_unique(session, "windows", compel=InternalError) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name="generic", compel=InternalError) dbstatus = Ready.get_instance(session) dbos = OperatingSystem.get_unique(session, name="windows", version="generic", archetype=dbarchetype, compel=InternalError) for (host, mac) in windows_hosts.items(): try: (short, dbdns_domain) = parse_fqdn(session, host) except AquilonError, err:
def render(self, session, logger, dbuser, domain, track, start, change_manager, comments, allow_manage, **arguments): if not dbuser: raise AuthorizationException("Cannot create a domain without " "an authenticated connection.") Branch.get_unique(session, domain, preclude=True) valid = re.compile('^[a-zA-Z0-9_.-]+$') if (not valid.match(domain)): raise ArgumentError("Domain name '%s' is not valid." % domain) # FIXME: Verify that track is a valid branch name? # Or just let the branch command fail? compiler = self.config.get("panc", "pan_compiler") dbtracked = None if track: dbtracked = Branch.get_unique(session, track, compel=True) if getattr(dbtracked, "tracked_branch", None): raise ArgumentError("Cannot nest tracking. Try tracking " "{0:l} directly.".format(dbtracked.tracked_branch)) start_point = dbtracked if change_manager: raise ArgumentError("Cannot enforce a change manager for " "tracking domains.") else: if not start: start = self.config.get("broker", "default_domain_start") start_point = Branch.get_unique(session, start, compel=True) dbdomain = Domain(name=domain, owner=dbuser, compiler=compiler, tracked_branch=dbtracked, requires_change_manager=bool(change_manager), comments=comments) session.add(dbdomain) if allow_manage is not None: dbdomain.allow_manage = allow_manage session.flush() domainsdir = self.config.get("broker", "domainsdir") clonedir = os.path.join(domainsdir, dbdomain.name) if os.path.exists(clonedir): raise InternalError("Domain directory already exists") kingdir = self.config.get("broker", "kingdir") cmd = ["branch"] if track: cmd.append("--track") else: cmd.append("--no-track") cmd.append(dbdomain.name) cmd.append(start_point.name) run_git(cmd, path=kingdir, logger=logger) # If the branch command above fails the DB will roll back as normal. # If the command below fails we need to clean up from itself and above. try: run_git(["clone", "--branch", dbdomain.name, kingdir, dbdomain.name], path=domainsdir, logger=logger) except ProcessException, e: try: remove_dir(clonedir, logger=logger) run_git(["branch", "-D", dbdomain.name], path=kingdir, logger=logger) except ProcessException, e2: logger.info("Exception while cleaning up: %s", e2)
def main(): print "Calculating sandbox base commits. This may take around 10 minutes." logging.basicConfig(level=logging.WARNING) kingdir = config.get("broker", "kingdir") domains = session.query(Domain).all() # Define preference order when multiple domains have the same commits. # This is just cosmetics, but makes it easier to verify the output. for idx, domain in enumerate( ("prod", "qa", "secure-aquilon-prod", "secure-aquilon-qa")): dbdom = Domain.get_unique(session, domain, compel=True) domains.remove(dbdom) domains.insert(idx, dbdom) base_commits = {} q = session.query(Sandbox) q = q.order_by('name') # The base_commit column does not exist yet... q = q.options(defer("base_commit")) for sandbox in q: base_domain = None base_commit = None min_ahead = None commits = run_git(["rev-list", "refs/heads/" + sandbox.name], path=kingdir).split("\n") for domain in domains: merge_base = run_git([ "merge-base", "refs/heads/" + sandbox.name, "refs/heads/" + domain.name ], path=kingdir).strip() # Number of commits since branching from the given domain ahead = commits.index(merge_base) if base_domain is None or ahead < min_ahead: base_domain = domain base_commit = merge_base min_ahead = ahead if min_ahead == 0: break print "{0: <40}: {1.name} (ahead {2})".format(sandbox, base_domain, min_ahead) base_commits[sandbox.name] = base_commit session.expunge_all() try: if session.bind.dialect.name == 'oracle': query = text(""" ALTER TABLE sandbox ADD base_commit VARCHAR2(40 CHAR) """) elif session.bind.dialect.name == 'postgresql': query = text(""" ALTER TABLE sandbox ADD base_commit CHARACTER VARYING (40) """) print "\nExecuting: %s" % query session.execute(query) session.commit() except DatabaseError: # Allow the script to be re-run by not failing if the column already # exists. If the column does not exist, then trying to update it will # fail anyway. print """ WARNING: Adding the sandbox.base_commit column has failed. If you're running this script for the second time, then that's likely OK, otherwise you should verify and correct the schema manually. """ session.rollback() for sandbox in q: sandbox.base_commit = base_commits[sandbox.name] session.commit() try: if session.bind.dialect.name == 'oracle': query = text(""" ALTER TABLE sandbox MODIFY (base_commit VARCHAR2(40 CHAR) CONSTRAINT sandbox_base_commit_nn NOT NULL) """) elif session.bind.dialect.name == 'postgresql': query = text(""" ALTER TABLE sandbox ALTER COLUMN base_commit SET NOT NULL """) print "\nExecuting: %s" % query session.execute(query) session.commit() except DatabaseError: print """ WARNING: Enabling the NOT NULL constraint for sandbox.base_commit column has failed. If you're running this script for the second time, then that's likely OK, otherwise you should verify and correct the schema manually. """ session.rollback()
def render(self, session, logger, source, target, sync, dryrun, comments, justification, user, requestid, **arguments): # Most of the logic here is duplicated in publish dbsource = Branch.get_unique(session, source, compel=True) # The target has to be a non-tracking domain dbtarget = Domain.get_unique(session, target, compel=True) if sync and isinstance(dbtarget.tracked_branch, Domain) \ and dbtarget.tracked_branch.autosync and dbtarget.autosync: # The user probably meant to deploy to the tracked branch, # but only do so if all the relevant autosync flags are # positive. logger.warning("Deploying to tracked branch %s and then will " "auto-sync %s" % ( dbtarget.tracked_branch.name, dbtarget.name)) dbtarget = dbtarget.tracked_branch elif dbtarget.tracked_branch: raise ArgumentError("Cannot deploy to tracking domain %s. " "Did you mean domain %s?" % (dbtarget.name, dbtarget.tracked_branch.name)) if sync and not dbtarget.is_sync_valid and dbtarget.trackers: # FIXME: Maybe raise an ArgumentError and request that the # command run with --nosync? Maybe provide a --validate flag? # For now, just auto-flip (below). pass if not dbtarget.is_sync_valid: dbtarget.is_sync_valid = True if dbtarget.requires_change_manager: if not justification: raise AuthorizationException( "{0} is under change management control. Please specify " "--justification.".format(dbtarget)) validate_justification(user, justification) if isinstance(dbsource, Sandbox): domainsdir = self.config.get('broker', 'domainsdir') targetdir = os.path.join(domainsdir, dbtarget.name) filterre = re.compile('^' + dbsource.base_commit + '$') found = run_git(['rev-list', 'HEAD'], path=targetdir, logger=logger, filterre=filterre) if not found: raise ArgumentError("You're trying to deploy a sandbox to a " "domain that does not contain the commit " "where the sandbox was branched from.") kingdir = self.config.get("broker", "kingdir") rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="deploy_", suffix="_%s" % dbsource.name, dir=rundir) try: run_git(["clone", "--shared", "--branch", dbtarget.name, kingdir, dbtarget.name], path=tempdir, logger=logger) temprepo = os.path.join(tempdir, dbtarget.name) # We could try to use fmt-merge-msg but its usage is so obscure that # faking it is easier merge_msg = [] merge_msg.append("Merge remote branch 'origin/%s' into %s" % (dbsource.name, dbtarget.name)) merge_msg.append("") merge_msg.append("User: %s" % user) merge_msg.append("Request ID: %s" % requestid) if justification: merge_msg.append("Justification: %s" % justification) if comments: merge_msg.append("Comments: %s" % comments) try: run_git(["merge", "--no-ff", "origin/%s" % dbsource.name, "-m", "\n".join(merge_msg)], path=temprepo, logger=logger, loglevel=CLIENT_INFO) except ProcessException, e: # No need to re-print e, output should have gone to client # immediately via the logger. raise ArgumentError("Failed to merge changes from %s into %s" % (dbsource.name, dbtarget.name)) # FIXME: Run tests before pushing back to template-king. # Use a different try/except and a specific error message. if dryrun: session.rollback() return run_git(["push", "origin", dbtarget.name], path=temprepo, logger=logger)
def render(self, session, logger, source, target, sync, dryrun, comments, justification, user, requestid, **arguments): # Most of the logic here is duplicated in publish dbsource = Branch.get_unique(session, source, compel=True) # The target has to be a non-tracking domain dbtarget = Domain.get_unique(session, target, compel=True) if sync and isinstance(dbtarget.tracked_branch, Domain) \ and dbtarget.tracked_branch.autosync and dbtarget.autosync: # The user probably meant to deploy to the tracked branch, # but only do so if all the relevant autosync flags are # positive. logger.warning("Deploying to tracked branch %s and then will " "auto-sync %s" % (dbtarget.tracked_branch.name, dbtarget.name)) dbtarget = dbtarget.tracked_branch elif dbtarget.tracked_branch: raise ArgumentError("Cannot deploy to tracking domain %s. " "Did you mean domain %s?" % (dbtarget.name, dbtarget.tracked_branch.name)) if sync and not dbtarget.is_sync_valid and dbtarget.trackers: # FIXME: Maybe raise an ArgumentError and request that the # command run with --nosync? Maybe provide a --validate flag? # For now, just auto-flip (below). pass if not dbtarget.is_sync_valid: dbtarget.is_sync_valid = True if dbtarget.requires_change_manager: if not justification: raise AuthorizationException( "{0} is under change management control. Please specify " "--justification.".format(dbtarget)) validate_justification(user, justification) if isinstance(dbsource, Sandbox): domainsdir = self.config.get('broker', 'domainsdir') targetdir = os.path.join(domainsdir, dbtarget.name) filterre = re.compile('^' + dbsource.base_commit + '$') found = run_git(['rev-list', 'HEAD'], path=targetdir, logger=logger, filterre=filterre) if not found: raise ArgumentError("You're trying to deploy a sandbox to a " "domain that does not contain the commit " "where the sandbox was branched from.") kingdir = self.config.get("broker", "kingdir") rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="deploy_", suffix="_%s" % dbsource.name, dir=rundir) try: run_git([ "clone", "--shared", "--branch", dbtarget.name, kingdir, dbtarget.name ], path=tempdir, logger=logger) temprepo = os.path.join(tempdir, dbtarget.name) # We could try to use fmt-merge-msg but its usage is so obscure that # faking it is easier merge_msg = [] merge_msg.append("Merge remote branch 'origin/%s' into %s" % (dbsource.name, dbtarget.name)) merge_msg.append("") merge_msg.append("User: %s" % user) merge_msg.append("Request ID: %s" % requestid) if justification: merge_msg.append("Justification: %s" % justification) if comments: merge_msg.append("Comments: %s" % comments) try: run_git([ "merge", "--no-ff", "origin/%s" % dbsource.name, "-m", "\n".join(merge_msg) ], path=temprepo, logger=logger, loglevel=CLIENT_INFO) except ProcessException, e: # No need to re-print e, output should have gone to client # immediately via the logger. raise ArgumentError("Failed to merge changes from %s into %s" % (dbsource.name, dbtarget.name)) # FIXME: Run tests before pushing back to template-king. # Use a different try/except and a specific error message. if dryrun: session.rollback() return run_git(["push", "origin", dbtarget.name], path=temprepo, logger=logger)
from nose.tools import eq_ NUM_MACHINES = 2 DNAME = 'ms.com' DNSENV = 'internal' SHORT_NAME_PREFIX = 'aqdb-test-host-' MACHINE_NAME_PREFIX = 'test_machine_' sess = DbFactory().Session() assert sess, 'No session in %s' % func_name() #TODO: factor out assert_type(obj, cls, func_name) the isinstance calls STATUS = Status.get_unique(sess, 'ready') assert isinstance(STATUS, Status), 'No ready status @ %s' % func_name() DOMAIN = Domain.get_unique(sess, 'ny-prod') assert isinstance(DOMAIN, Domain), 'no ny-prod domain @ %s' % func_name() ARCH = Archetype.get_unique(sess, 'aquilon') assert isinstance(ARCH, Archetype), 'No archetype @ %s' % func_name() OS = OperatingSystem.get_unique(sess, name='linux', version='5.0.1-x86_64', archetype=ARCH) assert isinstance(OS, OperatingSystem), 'No os @ %s' % func_name() PRSNLTY = Personality.get_unique(sess, name='generic', archetype=ARCH) assert isinstance(PRSNLTY, Personality), 'no personality @ %s' % func_name() NETWORK = sess.query(Network).filter(Network.cidr < 31).first() assert isinstance(NETWORK, Network), 'no network in %s' % func_name()