def mkrepo(i: Instance, rev: str, init: bool = True, force: bool = False) -> None: if init: mssh(i, "git init --bare materialize/.git") rev = git.rev_parse(rev) cmd: List[str] = [ "git", "push", "--no-verify", f"{instance_host(i)}:materialize/.git", # Explicit refspec is required if the host repository is in detached # HEAD mode. f"{rev}:refs/heads/scratch", ] if force: cmd.append("--force") spawn.runv( cmd, cwd=ROOT, env=dict(os.environ, GIT_SSH_COMMAND=" ".join(SSH_COMMAND)), ) mssh( i, f"cd materialize && git config core.bare false && git checkout {rev}", )
def mkrepo(i: Instance, identity_file: str, rev: str) -> None: """Create a Materialize repository on the remote ec2 instance and push the present repository to it.""" ssh.runv( ["git", "init", "--bare", "/home/ubuntu/materialize/.git"], "ubuntu", i.public_ip_address, identity_file=identity_file, ) os.chdir(ROOT) os.environ["GIT_SSH_COMMAND"] = f"ssh -i {identity_file}" head_rev = git.rev_parse(rev) git.push( f"ubuntu@{i.public_ip_address}:~/materialize/.git", f"refs/heads/scratch_{head_rev}", ) ssh.runv( [ "git", "-C", "/home/ubuntu/materialize", "config", "core.bare", "false" ], "ubuntu", i.public_ip_address, identity_file=identity_file, ) ssh.runv( ["git", "-C", "/home/ubuntu/materialize", "checkout", head_rev], "ubuntu", i.public_ip_address, identity_file=identity_file, )
def stage_deb(repo: mzbuild.Repository, package: str, version: str) -> None: """Stage a Debian package on Bintray. Note that this function does not cause anything to become public; a step to publish the files will be run in the deploy job. """ print(f"Staging deb {package} {version}") # Extract the materialized binary from the Docker image. This avoids # an expensive rebuild if we're using a cached image. ci_util.acquire_materialized( repo, repo.rd.xcargo_target_dir() / "release" / "materialized") # Build the Debian package. deb_path = repo.rd.xcargo_target_dir() / "debian" / "materialized.deb" spawn.runv( [ repo.rd.xcargo(), "deb", f"--variant={package}", "--no-build", "--no-strip", "--deb-version", version, "-p", "materialized", "-o", deb_path, ], cwd=repo.root, ) deb_size = deb_path.stat().st_size bt = bintray.Client("materialize", user="******", api_key=os.environ["BINTRAY_API_KEY"]) package = bt.repo("apt").package(package) try: print("Creating Bintray version...") commit_hash = git.rev_parse("HEAD") package.create_version(version, desc="git main", vcs_tag=commit_hash) except bintray.VersionAlreadyExistsError: # Ignore for idempotency. Bintray won't allow us to overwite an existing # .deb below with a file whose checksum doesn't match, so this is safe. pass print(f"Uploading Debian package ({humanize.naturalsize(deb_size)})...") package.debian_upload( version, path=f"/{version}/materialized-{commit_hash}.deb", data=open(deb_path, "rb"), distributions=["generic"], components=["main"], architectures=["amd64"], )
def deploy_tarball(platform: str, materialized: Path) -> None: tar_path = Path("materialized.tar.gz") with tarfile.open(str(tar_path), "x:gz") as f: f.addfile(_tardir("materialized")) f.addfile(_tardir("materialized/bin")) f.add( str(materialized), arcname="materialized/bin/materialized", filter=_sanitize_tarinfo, ) f.addfile(_tardir("materialized/etc/materialized")) size = humanize.naturalsize(os.lstat(tar_path).st_size) print(f"Tarball size: {size}") if os.environ["BUILDKITE_TAG"]: upload_tarball(tar_path, platform, os.environ["BUILDKITE_TAG"]) else: commit_sha = git.rev_parse("HEAD") upload_tarball(tar_path, platform, commit_sha) set_latest_redirect(platform, commit_sha)
def release( version: Version, checkout: Optional[str], create_branch: Optional[str], tag: bool, affect_remote: bool, ) -> None: """Update documents for a release and create tags If both `-b` and `-c` are specified, the checkout happens before the branch creation, meaning that the new branch is created on the target of `-c`. For example make release:: mkrelease -b prepare-v0.1.2 -c v0.1.1-rc1 v0.1.2-dev Has the same git semantics as:: git checkout -b prepare-v0.1.2 v0.1.1-rc1 \b Arguments: version: The version to release. The `v` prefix is optional """ if git.is_dirty(): raise UIError("working directory is not clean, stash or commit your changes") the_tag = f"v{version}" confirm_version_is_next(version, affect_remote) if checkout is not None: git.checkout(checkout) if create_branch is not None: git.create_branch(create_branch) confirm_on_latest_rc(affect_remote) change_line(BIN_CARGO_TOML, "version", f'version = "{version}"') change_line( LICENSE, "Licensed Work:", f"Licensed Work: Materialize Version {version}", ) # Don't update the change date unless some code has changed if version.prerelease: future = four_years_hence() change_line(LICENSE, "Change Date", f"Change Date: {future}") ui.say("Updating Cargo.lock") spawn.runv(["cargo", "check", "-p", "materialized"]) spawn.runv(["cargo", "check", "-p", "materialized"]) spawn.runv(["cargo", "check", "-p", "materialized", "--locked"]) if tag: git.commit_all_changed(f"release: {the_tag}") git.tag_annotated(the_tag) else: git.commit_all_changed(f"Prepare next phase of development: {the_tag}") latest_tag = get_latest_tag(fetch=False) # we have made an actual release if latest_tag.prerelease is None and click.confirm( f"Update doc/user/config.toml marking v{latest_tag} as released" ): update_versions_list(latest_tag) git.commit_all_changed(f"Update released versions to include v{latest_tag}") matching = git.first_remote_matching("MaterializeInc/materialize") if tag: if matching is not None: spawn.runv(["git", "show", "HEAD"]) if affect_remote and ui.confirm( f"\nWould you like to push the above changes as: git push {matching} {the_tag}" ): spawn.runv(["git", "push", matching, the_tag]) else: ui.say("") ui.say( f"Next step is to push {the_tag} to the MaterializeInc/materialize repo" ) else: branch = git.rev_parse("HEAD", abbrev=True) ui.say("") ui.say(f"Create a PR with your branch: '{branch}'")
def start(ns: argparse.Namespace) -> None: check_required_vars() revs = ns.revs.split(",") clusters = list( itertools.product(range(ns.trials), (git.rev_parse(rev) for rev in revs))) bench_script = ns.bench_script script_name = bench_script[0] script_args = " ".join((shlex.quote(arg) for arg in bench_script[1:])) # zip up the `misc` repository, for shipment to the remote machine os.chdir("misc/python") spawn.runv(["python3", "./setup.py", "sdist"]) with open("./dist/materialize-0.0.0.tar.gz", "rb") as f: pkg_data = f.read() os.chdir(os.environ["MZ_ROOT"]) if ns.append_metadata: munge_result = 'awk \'{ if (NR == 1) { print $0 ",Timestamp,BenchId,ClusterId,GitRef,S3Root" } else { print $0 ",\'$(date +%s)",$MZ_CB_BENCH_ID,$MZ_CB_CLUSTER_ID,$MZ_CB_GIT_REV,$MZ_CB_S3_ROOT"\'"}}\'' else: munge_result = "cat" mz_launch_script = f"""echo {shlex.quote(base64.b64encode(pkg_data).decode('utf-8'))} | base64 -d > mz.tar.gz python3 -m venv /tmp/mzenv >&2 . /tmp/mzenv/bin/activate >&2 python3 -m pip install --upgrade pip >&2 pip3 install ./mz.tar.gz[dev] >&2 MZ_ROOT=/home/ubuntu/materialize python3 -u -m {script_name} {script_args} result=$? echo $result > ~/bench_exit_code if [ $result -eq 0 ]; then {munge_result} < ~/mzscratch-startup.out | aws s3 cp - s3://{ns.s3_root}/$MZ_CB_BENCH_ID/$MZ_CB_CLUSTER_ID.csv >&2 else aws s3 cp - s3://{ns.s3_root}/$MZ_CB_BENCH_ID/$MZ_CB_CLUSTER_ID-FAILURE.out < ~/mzscratch-startup.out >&2 aws s3 cp - s3://{ns.s3_root}/$MZ_CB_BENCH_ID/$MZ_CB_CLUSTER_ID-FAILURE.err < ~/mzscratch-startup.err fi sudo shutdown -h now # save some money """ if ns.profile == "basic": descs = [ scratch.MachineDesc( name="materialized", launch_script=mz_launch_script, instance_type="r5a.4xlarge", ami="ami-0b29b6e62f2343b46", tags={}, size_gb=64, ), ] elif ns.profile == "confluent": confluent_launch_script = f"""bin/mzcompose --mz-find load-tests up""" descs = [ scratch.MachineDesc( name="materialized", launch_script=mz_launch_script, instance_type="r5a.4xlarge", ami="ami-0b29b6e62f2343b46", tags={}, size_gb=64, ), scratch.MachineDesc( name="confluent", launch_script=confluent_launch_script, instance_type="r5a.4xlarge", ami="ami-0b29b6e62f2343b46", tags={}, size_gb=1000, checkout=False, ), ] else: raise RuntimeError(f"Profile {ns.profile} is not implemented yet") bench_id = util.nonce(8) manifest_bytes = "".join(f"{i}-{rev}\n" for i, rev in clusters).encode("utf-8") boto3.client("s3").put_object(Body=manifest_bytes, Bucket="mz-cloudbench", Key=f"{bench_id}/MANIFEST") # TODO - Do these in parallel launched = [] for (i, rev) in clusters: launched += scratch.launch_cluster( descs=descs, nonce=f"{bench_id}-{i}-{rev}", subnet_id=DEFAULT_SUBNET_ID, security_group_id=DEFAULT_SG_ID, instance_profile=DEFAULT_INSTPROF_NAME, key_name=None, extra_tags={ "bench_id": bench_id, "bench_rev": rev, "bench_i": str(i), "LaunchedBy": scratch.whoami(), }, extra_env={ "MZ_CB_BENCH_ID": bench_id, "MZ_CB_CLUSTER_ID": f"{i}-{rev}", "MZ_CB_GIT_REV": rev, "MZ_CB_S3_ROOT": ns.s3_root, }, delete_after=scratch.now_plus(timedelta(days=1)), git_rev=rev, ) print("Launched instances:") print_instances(launched, format="table") # todo print(f"""Launched cloud bench with ID {bench_id}. To wait for results, run: bin/cloudbench check {bench_id}""")
def unstable_version(workspace: cargo.Workspace) -> str: """Computes the version to use for the materialized-unstable package.""" mz_version = workspace.crates["materialized"].version commit_count = git.rev_count("HEAD") commit_hash = git.rev_parse("HEAD") return f"{mz_version}-{commit_count}-{commit_hash}"
def main(version: str, checkout: Optional[str], create_branch: str, tag: bool) -> None: """Update documents for a release and create tags If both `-b` and `-c` are specified, the checkout happens before the branch creation, meaning that the new branch is created on the target of `-c`. For example make release:: mkrelease -b prepare-v0.1.2 -c v0.1.1-rc1 v0.1.2-dev Has the same git semantics as:: git checkout -b prepare-v0.1.2 v0.1.1-rc1 \b Arguments: version: The version to release. The `v` prefix is optional """ if git.is_dirty(): raise errors.MzConfigurationError( "working directory is not clean, stash or commit your changes") sys.exit(1) version = version.lstrip("v") the_tag = f"v{version}" confirm_version_is_next(version) if git.is_dirty(): raise errors.MzConfigurationError( "working directory is not clean, stash or commit your changes") sys.exit(1) if checkout is not None: git.checkout(checkout) if create_branch is not None: git.create_branch(create_branch) change_line(BIN_CARGO_TOML, "version", f'version = "{version}"') change_line( LICENSE, "Licensed Work:", f"Licensed Work: Materialize Version {version}", ) # Don't update the change date unless some code has changed if "-rc" in version or "-dev" in version: future = four_years_hence() change_line(LICENSE, "Change Date", f"Change Date: {future}") say("Updating Cargo.lock") spawn.runv(["cargo", "check", "-p", "materialized"]) if tag: git.commit_all_changed(f"release: {the_tag}") git.tag_annotated(the_tag) else: git.commit_all_changed(f"Prepare next phase of development: {the_tag}") matching = git.first_remote_matching("MaterializeInc/materialize") if tag: if matching is not None: if ui.confirm( f"\nWould you like me to run: git push {matching} {the_tag}" ): spawn.runv(["git", "push", matching, the_tag]) else: say("") say(f"Next step is to push {the_tag} to the MaterializeInc/materialize repo" ) else: branch = git.rev_parse("HEAD", abbrev=True) say("") say(f"Create a PR with your branch: '{branch}'")
def stage_deb(repo: mzbuild.Repository, package: str, version: str) -> None: """Stage a Debian package on Bintray. Note that this function does not cause anything to become public; a step to publish the files will be run in the deploy job. """ print(f"Staging deb {package} {version}") # Extract the materialized binary from the Docker image. This avoids # an expensive rebuild if we're using a cached image. ci_util.acquire_materialized( repo, repo.rd.xcargo_target_dir() / "release" / "materialized") # Build the Debian package. deb_path = repo.rd.xcargo_target_dir() / "debian" / "materialized.deb" spawn.runv( [ repo.rd.xcargo(), "deb", f"--variant={package}", "--no-build", "--no-strip", "--deb-version", version, "-p", "materialized", "-o", deb_path, ], cwd=repo.root, ) deb_size = deb_path.stat().st_size bt = bintray.Client("materialize", user="******", api_key=os.environ["BINTRAY_API_KEY"]) package = bt.repo("apt").package(package) try: print("Creating Bintray version...") commit_hash = git.rev_parse("HEAD") package.create_version(version, desc="git main", vcs_tag=commit_hash) except bintray.VersionAlreadyExistsError: # Ignore for idempotency. pass try: print( f"Uploading Debian package ({humanize.naturalsize(deb_size)})...") package.debian_upload( version, path=f"/{version}/materialized-{commit_hash}.deb", data=open(deb_path, "rb"), distributions=["generic"], components=["main"], architectures=["amd64"], ) except bintray.DebAlreadyExistsError: # Ideally `cargo deb` would produce identical output for identical input # to give us idempotency for free, since Bintray won't produce a # DebAlreadyExistsError if you upload the identical .deb file twice. But # it doesn't, so instead we just assume the .deb that's already uploaded # is functionally equivalent to the one we just built. print( "Debian package already exists; assuming it is valid and skipping upload" )