def update_crates_versions(args, temp_dir, repositories): write_msg('=> Updating [master] crates version...') for crate in args.crates: update_type = crate['up-type'] crate = crate['crate'] if args.specified_crate is not None and crate[ 'crate'] != args.specified_crate: continue if update_repo_version(crate["repository"], crate["crate"], crate["path"], temp_dir, update_type, args.badges_only or args.tags_only) is False: write_error('The update for the "{}" crate failed...'.format( crate["crate"])) return False write_msg('Done!') if args.badges_only is False and args.tags_only is False: write_msg('=> Committing{} to the "{}" branch...'.format( " and pushing" if args.no_push is False else "", consts.MASTER_TMP_BRANCH)) for repo in repositories: commit(repo, temp_dir, "Update versions [ci skip]") if args.no_push is False: push(repo, temp_dir, consts.MASTER_TMP_BRANCH) write_msg('Done!') if args.no_push is False: write_msg('=> Creating PRs on master branch...') for repo in repositories: create_pull_request(repo, consts.MASTER_TMP_BRANCH, "master", args.token) write_msg('Done!') return True
def end(message): if not len(Gameday.games): bot.reply_to( message, 'No games, add with /game or /tourngame command, or /start to reset' ) Gameday.init() return csv_path_to_save = os.path.join(DATA_DIR, f'{Gameday.date.strftime("%Y")}.csv') dataframe_to_save = pd.DataFrame(columns=[ 'Date', 'Index', 'Winner', 'Looser', 'Score', 'Tournament', 'Stage' ]) if os.path.isfile(csv_path_to_save): dataframe_to_save = pd.read_csv(csv_path_to_save) for result in Gameday.games: dataframe_to_save = add_result(dataframe_to_save, result.user1, result.user2, result.score1, result.score2, result.tournament, result.stage, Gameday.date) dataframe_to_save.to_csv(csv_path_to_save, index=False) diffs = update_json_data() commit(Gameday.getDay()) Gameday.cleanup() message_text = f'Success! 🎉\n\n{get_diffs_table(diffs)}\nCheck out https://lzrby.github.io/squash' bot.send_message(message.chat.id, message_text, disable_notification=True)
def update_crate_repositories_branches(args, temp_dir, repositories): write_msg('=> Merging "master" branches into "crate" branches...') for repo in repositories: merging_branches(repo, temp_dir, "master") write_msg('Done!') write_msg('=> Updating [crate] crates version...') for crate in args.crates: crate = crate['crate'] if args.specified_crate is not None and crate[ 'crate'] != args.specified_crate: continue if update_crate_version(crate["repository"], crate["crate"], crate["path"], temp_dir, args.specified_crate) is False: write_error('The update for the "{}" crate failed...'.format( crate["crate"])) return False write_msg('Done!') write_msg('=> Committing{} to the "{}" branch...'.format( " and pushing" if args.no_push is False else "", consts.CRATE_TMP_BRANCH)) for repo in repositories: commit(repo, temp_dir, "Update versions [ci skip]") if args.no_push is False: push(repo, temp_dir, consts.CRATE_TMP_BRANCH) write_msg('Done!') if args.no_push is False: write_msg('=> Creating PRs on crate branch...') for repo in repositories: create_pull_request(repo, consts.CRATE_TMP_BRANCH, "crate", args.token) write_msg('Done!') return True
def teardown(): sess.close() (ms, intrnl) = get_reqs() #TODO: move this general approach to test_dns??? # generalize the entire approach and take a count on the way in and out qry = sess.query(ARecord).filter(ARecord.name.like(AREC_PREFIX + '%')) qry = qry.filter_by(dns_environment=intrnl) if qry.count() == 0: log.info('successful cascaded deletion of ARecords') else: log.error('ARecords left intact when they should be delete cascaded') for rec in qry.filter_by(dns_domain=ms).all(): sess.delete(rec) commit(sess) log.debug('deleted ARecord %s' % rec.fqdn) dns_count_after = sess.query(DnsRecord).count() if dns_count_after != dns_count_b4: log.warning('%s record(s) left after teardown in %s, should be %s' % (dns_count_after, func_name(), dns_count_b4)) del_machines(sess, MCHN_PREFIX) mchn_count_after = sess.query(Machine).count() if mchn_count_b4 != mchn_count_after: log.warning('%s machines left after %s, should be %s' % (mchn_count_after, func_name(), mchn_count_b4))
def rev(source_dir, chromium_dir): src_commit = system(["git", "show-ref", "HEAD", "-s"], cwd=source_dir).strip() for d in dirs_to_clone: if os.path.exists(os.path.join(chromium_dir, d)): print "removing directory %s" % d system(["git", "rm", "-r", d], cwd=chromium_dir) print "cloning directory %s" % d files = system(["git", "ls-files", d], cwd=source_dir) for f in files.splitlines(): # Don't copy presubmit files over since the code is read-only on the # chromium side. if os.path.basename(f) == "PRESUBMIT.py": continue dest_path = os.path.join(chromium_dir, f) system(["mkdir", "-p", os.path.dirname(dest_path)]) system(["cp", os.path.join(source_dir, f), dest_path]) os.chdir(chromium_dir) system(["git", "add", d], cwd=chromium_dir) with open("mojo/public/VERSION", "w") as version_file: version_file.write(src_commit) system(["git", "add", "mojo/public/VERSION"], cwd=chromium_dir) commit("Update mojo sdk to rev " + src_commit, cwd=chromium_dir)
def rev(source_dir, chromium_dir): src_commit = system(["git", "show-ref", "HEAD", "-s"], cwd=source_dir).strip() for input_dir, dest_dir in dirs_to_clone.iteritems(): if os.path.exists(os.path.join(chromium_dir, dest_dir)): print "removing directory %s" % dest_dir system(["git", "rm", "-r", dest_dir], cwd=chromium_dir) print "cloning directory %s into %s" % (input_dir, dest_dir) files = system(["git", "ls-files", input_dir], cwd=source_dir) for f in files.splitlines(): # Don't copy presubmit files over since the code is read-only on the # chromium side. if os.path.basename(f) == "PRESUBMIT.py": continue # Clone |f| into Chromium under |dest_dir| at its location relative to # |input_dir|. f_relpath = os.path.relpath(f, input_dir) dest_path = os.path.join(chromium_dir, dest_dir, f_relpath) system(["mkdir", "-p", os.path.dirname(dest_path)]) system(["cp", os.path.join(source_dir, f), dest_path]) os.chdir(chromium_dir) system(["git", "add", dest_dir], cwd=chromium_dir) mojo_public_dest_dir = os.path.join(sdk_prefix_in_chromium, "mojo/public") version_filename = os.path.join(mojo_public_dest_dir, "VERSION") with open(version_filename, "w") as version_file: version_file.write(src_commit) system(["git", "add", version_filename], cwd=chromium_dir) commit("Update mojo sdk to rev " + src_commit, cwd=chromium_dir)
def rev(source_dir, dest_dir, dirs_to_rev, name): for dir_to_rev in dirs_to_rev: if type(dir_to_rev) is tuple: d, file_subset = dir_to_rev else: d = dir_to_rev file_subset = None print "removing directory %s" % d try: system(["git", "rm", "-r", d], cwd=dest_dir) except subprocess.CalledProcessError: print "Could not remove %s" % d print "cloning directory %s" % d if file_subset is None: files = system(["git", "ls-files", d], cwd=source_dir).splitlines() else: files = [os.path.join(d, f) for f in file_subset] for f in files: source_path = os.path.join(source_dir, f) if not os.path.isfile(source_path): continue dest_path = os.path.join(dest_dir, f) system(["mkdir", "-p", os.path.dirname(dest_path)], cwd=source_dir) system(["cp", source_path, dest_path], cwd=source_dir) system(["git", "add", d], cwd=dest_dir) for f in files_not_to_roll: system(["git", "checkout", "HEAD", f], cwd=dest_dir) system(["git", "add", "."], cwd=dest_dir) src_commit = system(["git", "rev-parse", "HEAD"], cwd=source_dir).strip() commit("Update to %s %s" % (name, src_commit), cwd=dest_dir)
def test_create_host_simple(): for i in xrange(NUM_MACHINES): machine = MACHINE_FACTORY.next() #create an a_record for the primary name name = '%s%s' % (SHORT_NAME_PREFIX, i) #convert ip to ipaddr to int, add 1 (router) + i, #convert back to ip, then back to string #convoluted indeed (FIXME?) ip = str(IPAddr(int(IPAddr(NETWORK.ip)) + i +1 )) a_rec = FutureARecord.get_or_create(session=sess, fqdn='%s.ms.com' % name, ip=ip) BRANCH = sess.query(Branch).first() # make sure to delete them after (Or put in commit block?) sess.refresh(machine) host = Host(machine=machine, #primary_name_id=a_rec.id, personality=PRSNLTY, status=STATUS, operating_system=OS, branch=BRANCH) add(sess, host) pna = PrimaryNameAssociation(a_record_id=a_rec.id, hardware_entity_id=machine.machine_id) add(sess, pna) commit(sess) hosts = sess.query(Host).all() eq_(len(hosts), NUM_MACHINES) print 'created %s hosts'% len(hosts)
def del_clusters(): clist = sess.query(Cluster).all() if len(clist) > 0: for c in clist: sess.delete(c) commit(sess) print 'deleted %s cluster(s)' % len(clist)
def regenerate_documentation(args, temp_dir, repositories): if args.badges_only is True or args.tags_only is True: return input( "About to regenerate documentation. Are you sure you want to continue? " + "(Press ENTER to continue)") update_doc_content_repository(repositories, temp_dir, args.token, args.no_push, args) write_msg('=> Preparing doc repo (too much dark magic in here urg)...') cleanup_doc_repo(temp_dir) write_msg('Done!') write_msg('=> Building docs...') for crate in args.crates: crate = crate['crate'] if crate['crate'] == 'gtk-test': continue write_msg('-> Building docs for {}...'.format(crate['crate'])) build_docs(crate['repository'], temp_dir, crate['path'], crate.get('doc_name', crate['crate'])) end_docs_build(temp_dir) write_msg('Done!') write_msg('=> Committing{} docs to the "{}" branch...'.format( " and pushing" if args.no_push is False else "", consts.CRATE_TMP_BRANCH)) commit(consts.DOC_REPO, temp_dir, "Regen docs") if args.no_push is False: push(consts.DOC_REPO, temp_dir, consts.CRATE_TMP_BRANCH) create_pull_request(consts.DOC_REPO, consts.CRATE_TMP_BRANCH, "gh-pages", args.token) write_msg("New pull request(s):\n\n{}\n".format( '\n'.join(PULL_REQUESTS))) write_msg('Done!')
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s'% (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s'% (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s'% (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from(join( Personality, Archetype)).filter( and_(Archetype.name == 'vmhost', Personality.name == 'generic')).one() sess.autoflush = False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush = True commit(sess) hosts = sess.query(Host).filter(Host.name.like(HOST_NAME + '%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts' % len(hosts)
def del_hosts(): hosts = sess.query(Host).filter(Host.name.like(HOST_NAME+'%')).all() if hosts: for host in hosts: sess.delete(host) commit(sess) print 'deleted %s hosts' % len(hosts)
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s' % (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s' % (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s' % (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def patch_and_filter(dest_dir): os.chdir(dest_dir) utils.filter_file("build/landmines.py", lambda line: not "gyp_environment" in line) utils.commit("filter gyp_environment out of build/landmines.py") patch(dest_dir)
def del_service_instance(name): si = sess.query(ServiceInstance).filter_by(name=name).first() if si: count = sess.query(ServiceInstance).filter_by(name=name).delete() commit(sess) print 'called del_service_instance(%s), deleted %s rows' % (name, count)
def rev(source_dir): os.chdir(source_dir) src_commit = system(["git", "show-ref", "HEAD"]) for d in dirs: print "removing directory %s" % d os.chdir(mojo_root_dir) try: system(["git", "rm", "-r", d]) except subprocess.CalledProcessError: print "Could not remove %s" % d print "cloning directory %s" % d os.chdir(source_dir) files = system(["git", "ls-files", d]) for f in files.splitlines(): dest_path = os.path.join(mojo_root_dir, f) system(["mkdir", "-p", os.path.dirname(dest_path)]) system(["cp", os.path.join(source_dir, f), dest_path]) os.chdir(mojo_root_dir) system(["git", "add", d]) for f in files_to_copy: system(["cp", os.path.join(source_dir, f), os.path.join(mojo_root_dir, f)]) os.chdir(mojo_root_dir) system(["git", "add", "."]) commit("Update from chromium " + src_commit)
def del_service(name): ''' reusable service delete for other tests ''' svc = sess.query(Service).filter_by(name=name).first() if svc: count = sess.query(Service).filter_by(name=name).delete() commit(sess) print 'session.delete(%s) deleted %s rows' % (name, count)
def rev(source_dir): for d in dirs: print "removing directory %s" % d try: system(["git", "rm", "-r", d], cwd=mojo_root_dir) except subprocess.CalledProcessError: print "Could not remove %s" % d print "cloning directory %s" % d files = system(["git", "ls-files", d], cwd=source_dir) for f in files.splitlines(): dest_path = os.path.join(mojo_root_dir, f) system(["mkdir", "-p", os.path.dirname(dest_path)], cwd=source_dir) system(["cp", os.path.join(source_dir, f), dest_path], cwd=source_dir) system(["git", "add", d], cwd=mojo_root_dir) for f in files_to_copy: system([ "cp", os.path.join(source_dir, f), os.path.join(mojo_root_dir, f) ]) system(["git", "add", "."], cwd=mojo_root_dir) src_commit = system(["git", "rev-parse", "HEAD"], cwd=source_dir).strip() src_rev = chromium_rev_number(src_commit) commit("Update from https://crrev.com/" + src_rev, cwd=mojo_root_dir)
def teardown(): sess.close() (ms, intrnl) = get_reqs() #TODO: move this general approach to test_dns??? # generalize the entire approach and take a count on the way in and out qry = sess.query(ARecord).filter( ARecord.name.like(AREC_PREFIX + '%')) qry = qry.filter_by(dns_environment=intrnl) if qry.count() == 0: log.info('successful cascaded deletion of ARecords') else: log.error('ARecords left intact when they should be delete cascaded') for rec in qry.filter_by(dns_domain=ms).all(): sess.delete(rec) commit(sess) log.debug('deleted ARecord %s' % rec.fqdn) dns_count_after = sess.query(DnsRecord).count() if dns_count_after != dns_count_b4: log.warning('%s record(s) left after teardown in %s, should be %s' % ( dns_count_after, func_name(), dns_count_b4)) del_machines(sess, MCHN_PREFIX) mchn_count_after = sess.query(Machine).count() if mchn_count_b4 != mchn_count_after: log.warning('%s machines left after %s, should be %s'%( mchn_count_after, func_name(), mchn_count_b4))
def install_package_cached(self, repo, pkgname, version, package_cache_path): local_packages_path = path.join(path.join(USER_CURRENT_DIR, self._package_folder)) # download url = self.get_download_url(name=pkgname, version=version) filename = '{0}@{1}.{2}'.format(pkgname, version, self._file_extension) dest = path.join(APP_ARCHIVE_DIR, pkgname, version + ".tgz") # Check package archived is downloaded # if no download # if yes, skip downloading if (path.isfile(dest)): print 'Locate {0}'.format(filename) else: print 'Download {0}'.format(filename) self.download(url=url, dest=dest) # Extract file path_to_package = path.join(local_packages_path, pkgname) tar = tarfile.open(dest) tar.extractall(path=path_to_package) tar.close() # tmp_package_path = path.join(path_to_package, self._tmp_package_folder) # for f in os.listdir(tmp_package_path): # shutil.move(path.join(tmp_package_path, f), path_to_package) tmp_package_path = path.join(path_to_package, self._tmp_package_folder) for f in os.listdir(tmp_package_path): shutil.move(path.join(tmp_package_path, f), path.join(package_cache_path, f)) try: utils.commit(repo, message=version) except Exception as e: print e
def del_hosts(): hosts = sess.query(Host).filter(Host.name.like(HOST_NAME + '%')).all() if hosts: for host in hosts: sess.delete(host) commit(sess) print 'deleted %s hosts' % len(hosts)
def del_service(name): """ reusable service delete for other tests """ svc = sess.query(Service).filter_by(name=name).first() if svc: count = sess.query(Service).filter_by(name=name).delete() commit(sess) print "session.delete(%s) deleted %s rows" % (name, count)
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from( join(Personality, Archetype)).filter( and_(Archetype.name=='vmhost', Personality.name=='generic')).one() sess.autoflush=False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush=True commit(sess) hosts = sess.query(Host).filter( Host.name.like(HOST_NAME+'%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts'% len(hosts)
def update_doc_content_repository(repositories, temp_dir, token, no_push, args): if clone_repo(consts.DOC_CONTENT_REPO, temp_dir) is False: input('Try to fix the problem then press ENTER to continue...') write_msg("Done!") repo_path = join(temp_dir, consts.DOC_CONTENT_REPO) write_msg("=> Generating documentation for crates...") for repo in repositories: current = None for crate in args.crates: crate = crate['crate'] if crate['repository'] == repo: current = crate break if current is None: input( 'No repository matches "{}", something is weird. (Press ENTER TO CONTINUE)' ) continue if current.get("doc", True) is False: continue write_msg('==> Generating documentation for "{}"'.format(current)) path = join(temp_dir, current['repository']) command = [ 'bash', '-c', 'cd {} && make doc && mv vendor.md {}'.format( path, join(repo_path, current['crate'])) ] if not exec_command_and_print_error(command): input("Fix the error and then press ENTER") write_msg('Done!') write_msg('Committing "{}" changes...'.format(consts.DOC_CONTENT_REPO)) commit(consts.DOC_CONTENT_REPO, temp_dir, "Update vendor files") if no_push is False: push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH) # We always make minor releases in here, no need for a more important one considering we don't # change the API. if update_repo_version(consts.DOC_CONTENT_REPO, consts.DOC_CONTENT_REPO, "", temp_dir, UpdateType.MINOR, False) is False: write_error('The update for the "{}" crate failed...'.format( consts.DOC_CONTENT_REPO)) input('Fix the error and then press ENTER') commit(consts.DOC_CONTENT_REPO, temp_dir, "Update version") if no_push is False: push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH) create_pull_request(consts.DOC_CONTENT_REPO, consts.MASTER_TMP_BRANCH, "master", token, False) input(( 'All done with the "{}" update: please merge the PR then press ENTER so the ' 'publication can performed...').format(consts.DOC_CONTENT_REPO)) publish_crate(consts.DOC_CONTENT_REPO, "", temp_dir, consts.DOC_CONTENT_REPO, checkout_branch='master') write_msg('Ok all done! We can move forward now!') else: write_msg(( 'All with "{}", you still need to publish a new version if you want the changes ' 'to be taken into account').format(consts.DOC_CONTENT_REPO))
def patch_and_filter(dest_dir, relative_patches_dir): os.chdir(dest_dir) utils.filter_file("build/landmines.py", lambda line: not "gyp_environment" in line) utils.commit("filter gyp_environment out of build/landmines.py") patch(dest_dir, relative_patches_dir)
def test_whitespace_padding(): a = StringTbl(name=' some eXTRa space ') add(s, a) commit(s) p = StringTbl.__table__.select().execute().fetchall()[1] assert p['name'].startswith('s'), 'Whitespace not stripped in AqStr' print a
def test_whitespace_padding(): a=StringTbl(name=' some eXTRa space ') add(s, a) commit(s) p = StringTbl.__table__.select().execute().fetchall()[1] assert p['name'].startswith('s'), 'Whitespace not stripped in AqStr' print a
def test_del_machine(): mchn = sess.query(Machine).filter_by(label=NAME).first() if mchn: sess.delete(mchn) commit(sess) t = sess.query(Machine).filter_by(label=NAME).first() assert t is None log.info('deleted machine')
def test_valid_aqstr(): a = StringTbl(name='Hi there') add(s, a) commit(s) s.expunge_all() rec = s.query(StringTbl).first() print rec.name assert rec.name == 'hi there', 'String not lowercase in AqStr'
def del_metas(): mlist = sess.query(MetaCluster).all() if len(mlist) > 0: print '%s clusters before deleting metas' % (sess.query(Cluster).count()) for m in mlist: sess.delete(m) commit(sess) print 'deleted %s metaclusters' % (len(mlist)) print '%s clusters left after deleting metas' % (sess.query(Cluster).count())
def test_del_machine(): mchn = sess.query(Machine).filter_by(label=NAME).first() if mchn: sess.delete(mchn) commit(sess) t = sess.query(Machine).filter_by(label=NAME).first() assert t is None log.info("deleted machine")
def test_vm_append(): machines = sess.query(Machine).filter(Machine.label.like(VM_NAME+'%')).all() ec = EsxCluster.get_unique(sess, CLUSTER_NAME) assert ec ec.machines.append(machines[12]) commit(sess) assert len(ec.machines) is 11 print 'now, there are %s machines in the cluster: %s'% (len(ec.machines), ec.machines)
def del_machines(sess=sess, prefix=NAME): """ deletes all machines with names like prefix% """ machines = sess.query(Machine).filter(Machine.label.like(prefix + "%")).all() if machines: log.debug("attempting to delete %s machines" % len(machines)) for machine in machines: log.info("deleting %s" % machine) sess.delete(machine) commit(sess)
def clean_up(): del_machines(sess, MACHINE_NAME) disk_ids = sess.query(Disk.id).all() if disk_ids: for id in disk_ids: if id in disk_id_cache: print 'disk with id %s not deleted'% (id) s.query(Disk).filter_by(id=id).delete() commit(sess) del_service_instance(SHARE_NAME) del_service(NAS_SVC)
def del_machines(sess=sess, prefix=NAME): """ deletes all machines with names like prefix% """ machines = sess.query(Machine).filter(Machine.label.like(prefix + '%')).all() if machines: log.debug('attempting to delete %s machines' % len(machines)) for machine in machines: log.info('deleting %s' % machine) sess.delete(machine) commit(sess)
def teardown(): machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME_PREFIX + '%')).all() for machine in machines: if machine.interfaces: for iface in machine.interfaces: sess.delete(iface) sess.delete(machine) commit(sess) log.debug('deleted %s machines' % len(machines))
def test_vm_append(): machines = sess.query(Machine).filter(Machine.label.like(VM_NAME + '%')).all() ec = EsxCluster.get_unique(sess, CLUSTER_NAME) assert ec ec.machines.append(machines[12]) commit(sess) assert len(ec.machines) is 11 print 'now, there are %s machines in the cluster: %s' % (len( ec.machines), ec.machines)
def test_fail_upd_bootable_iface_to_null_mac(): machine = sess.query(Machine).first() iface = Interface(hardware_entity=machine, name='eth1', mac=random_mac(), bootable=True, interface_type='public') create(sess, iface) assert isinstance(iface, Interface), 'no iface created @ %s' % func_name() iface.mac = None commit(sess) assert iface.mac is not None, 'able to set a bootable interface to null'
def test_fail_upd_mgmt_iface_to_null_mac(): machine = sess.query(Machine).first() iface = Interface(hardware_entity=machine, name='ipmi', mac=random_mac(), bootable=True, interface_type='management') create(sess, iface) assert isinstance(iface, Interface), 'no iface created @ %s' % func_name() iface.mac = None commit(sess) assert iface.mac is not None, 'set a management iface to null mac_addr'
def roll(target_version): find_depot_tools_path = os.path.join(mojo_root_dir, "tools") sys.path.insert(0, find_depot_tools_path) # pylint: disable=F0401 import find_depot_tools depot_tools_path = find_depot_tools.add_depot_tools_to_path() try: chromium_rev = chromium_rev_number(target_version) except urllib2.HTTPError: print ("Failed to identify a Chromium revision associated with %s. " "Ensure that target_version is a Chromium origin/master " "commit.") % (target_version) return 1 mojoms_gs_path = "gs://mojo/network_service/%s/mojoms.zip" % (target_version,) network_service_path = os.path.join( mojo_root_dir, "mojo", "services", "network") mojoms_path = os.path.join(network_service_path, "public", "interfaces") mojo_public_tools_path = os.path.join( mojo_root_dir, "mojo", "public", "tools") version_path = os.path.join(mojo_public_tools_path, "NETWORK_SERVICE_VERSION") try: with tempfile.NamedTemporaryFile() as temp_zip_file: gs.download_from_public_bucket(mojoms_gs_path, temp_zip_file.name, depot_tools_path) try: system(["git", "rm", "-r", mojoms_path], cwd=mojo_root_dir) except subprocess.CalledProcessError: print ("Could not remove %s. " "Ensure your local tree is in a clean state." % mojoms_path) return 1 with zipfile.ZipFile(temp_zip_file.name) as z: z.extractall(mojoms_path) # pylint: disable=C0302,bare-except except: print ("Failed to download the mojom files associated with %s. Ensure that " "the corresponding network service artifacts were uploaded to " "Google Storage.") % (target_version) return 1 with open(version_path, 'w') as stamp_file: stamp_file.write(target_version) system(["git", "add", "public"], cwd=network_service_path) system(["git", "add", "NETWORK_SERVICE_VERSION"], cwd=mojo_public_tools_path) commit("Roll the network service to https://crrev.com/" + chromium_rev, cwd=mojo_root_dir) return 0
def test_no_host_threshold(): """ ensure down_hosts_threshold must exist """ br = Branch.get_unique(sess, 'ny-prod', compel=True) np = Building.get_unique(sess, name='np', compel=True) per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, branch=br) add(sess, ec) commit(sess)
def test_append(): mc = MetaCluster.get_unique(sess, M3) assert mc, 'no metacluster in test_append' assert len(mc.members) is 0 print '%s before append test has members %s' % (mc, mc.members) cl5 = cl_factory.next() assert cl5 print cl5 mc.members.append(cl5) commit(sess) assert len(mc.members) is 1 print 'members now %s' % (mc.members)
def test_create_local_disk(): """ create a local disk """ print 'setup finished' machine = add_machine(sess, MACHINE_NAME, MODEL) assert machine, "Could not create machine in %s"% (inspect.stack()[1][3]) print machine disk = LocalDisk(machine=machine, capacity=180, controller_type='scsi') add(sess, disk) commit(sess) assert disk, 'no disk created by %s'% (inspect.stack()[1][3]) print disk disk_id_cache.append(disk.id)
def test_cascaded_delete2(): """ deleting services deletes cluster aligned services """ svc = sess.query(Service).filter_by(name=SVC_2).first() assert svc, "No throw away service in %s" % func_name() # add cas, delete the service, make sure the CAS disappears cas = ClusterAlignedService(cluster_type="esx", service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() # FIX ME: put in tear_down() sess.delete(svc) commit(sess) sess.refresh(cas) sess.refresh(svc)
def test_cascaded_delete2(): """ deleting services deletes cluster aligned services """ svc = sess.query(Service).filter_by(name=SVC_2).first() assert svc, "No throw away service in %s" % func_name() #add cas, delete the service, make sure the CAS disappears cas = ClusterAlignedService(cluster_type='esx', service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() #FIX ME: put in tear_down() sess.delete(svc) commit(sess) sess.refresh(cas) sess.refresh(svc)
def del_machines(): machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME+'%')).all() if machines: for machine in machines: sess.delete(machine) commit(sess) print 'deleted %s esx machines' % (len(machines)) machines = sess.query(Machine).filter(Machine.label.like(VM_NAME+'%')).all() if machines: for vm in machines: sess.delete(vm) commit(sess) print 'deleted %s virtual machines' % (len(machines))
def rev(source_dir, chromium_dir, mojo_revision): src_commit = system(["git", "rev-parse", mojo_revision], cwd=source_dir).strip() for input_dir, dest_dir in dirs_to_clone.iteritems(): if os.path.exists(os.path.join(chromium_dir, dest_dir)): print "removing directory %s" % dest_dir system(["git", "rm", "-r", dest_dir], cwd=chromium_dir) print "cloning directory %s into %s" % (input_dir, dest_dir) files = system(["git", "ls-files", input_dir], cwd=source_dir) for f in files.splitlines(): # Don't copy presubmit files over since the code is read-only on the # chromium side. if os.path.basename(f) == "PRESUBMIT.py": continue exclude = False for excluded in sdk_dirs_to_not_clone: if f.startswith(excluded): exclude = True break if exclude: continue # Clone |f| into Chromium under |dest_dir| at its location relative to # |input_dir|. f_relpath = os.path.relpath(f, input_dir) dest_path = os.path.join(chromium_dir, dest_dir, f_relpath) system(["mkdir", "-p", os.path.dirname(dest_path)]) system(["cp", os.path.join(source_dir, f), dest_path]) os.chdir(chromium_dir) system(["git", "add", dest_dir], cwd=chromium_dir) mojo_public_dest_dir = os.path.join(sdk_prefix_in_chromium, "mojo/public") version_filename = os.path.join(mojo_public_dest_dir, "VERSION") with open(version_filename, "w") as version_file: version_file.write(src_commit) system(["git", "add", version_filename], cwd=chromium_dir) # Reset preserved files that were blown away. for rel_path in preserved_chromium_files: preserved_path = os.path.join(sdk_prefix_in_chromium, rel_path) system(["git", "reset", "--", preserved_path]) system(["git", "checkout", preserved_path]) commit("Update mojo sdk to rev " + src_commit, cwd=chromium_dir)
def rev(source_dir, chromium_dir, mojo_revision): src_commit = system(['git', 'rev-parse', mojo_revision], cwd=source_dir).strip() for input_dir, dest_dir in dirs_to_clone.iteritems(): if os.path.exists(os.path.join(chromium_dir, dest_dir)): print 'removing directory %s' % dest_dir system(['git', 'rm', '-r', dest_dir], cwd=chromium_dir) print 'cloning directory %s into %s' % (input_dir, dest_dir) files = system(['git', 'ls-files', input_dir], cwd=source_dir) for f in files.splitlines(): # Don't copy presubmit files over since the code is read-only on the # chromium side. if os.path.basename(f) == 'PRESUBMIT.py': continue exclude = False for excluded in sdk_dirs_to_not_clone: if f.startswith(excluded): exclude = True break if exclude: continue # Clone |f| into Chromium under |dest_dir| at its location relative to # |input_dir|. f_relpath = os.path.relpath(f, input_dir) dest_path = os.path.join(chromium_dir, dest_dir, f_relpath) system(['mkdir', '-p', os.path.dirname(dest_path)]) system(['cp', os.path.join(source_dir, f), dest_path]) os.chdir(chromium_dir) system(['git', 'add', dest_dir], cwd=chromium_dir) mojo_public_dest_dir = os.path.join(sdk_prefix_in_chromium, 'mojo/public') version_filename = os.path.join(mojo_public_dest_dir, 'VERSION') with open(version_filename, 'w') as version_file: version_file.write(src_commit) system(['git', 'add', version_filename], cwd=chromium_dir) # Reset preserved files that were blown away. for rel_path in preserved_chromium_files: preserved_path = os.path.join(sdk_prefix_in_chromium, rel_path) system(['git', 'reset', '--', preserved_path]) system(['git', 'checkout', preserved_path]) commit('Update mojo sdk to rev ' + src_commit, cwd=chromium_dir)