Пример #1
0
def setup_virtualenv(
    target_venv_path: Optional[str],
    requirements_file: str,
    patch_activate_script: bool = False,
) -> str:

    sha1sum = generate_hash(requirements_file)
    # Check if a cached version already exists
    if target_venv_path is None:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, "venv")
    else:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum,
                                        os.path.basename(target_venv_path))
    success_stamp = os.path.join(cached_venv_path, "success-stamp")
    if not os.path.exists(success_stamp):
        do_setup_virtualenv(cached_venv_path, requirements_file)
        with open(success_stamp, "w") as f:
            f.close()

    print("Using cached Python venv from {}".format(cached_venv_path))
    if target_venv_path is not None:
        run_as_root(["ln", "-nsf", cached_venv_path, target_venv_path])
        if patch_activate_script:
            do_patch_activate_script(target_venv_path)
    return cached_venv_path
Пример #2
0
def setup_virtualenv(target_venv_path,
                     requirements_file,
                     virtualenv_args=None,
                     patch_activate_script=False):
    # type: (Optional[str], str, Optional[List[str]], bool) -> str

    # Check if a cached version already exists
    path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
    output = subprocess.check_output([path, requirements_file],
                                     universal_newlines=True)
    sha1sum = output.split()[0]
    if target_venv_path is None:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')
    else:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum,
                                        os.path.basename(target_venv_path))
    success_stamp = os.path.join(cached_venv_path, "success-stamp")
    if not os.path.exists(success_stamp):
        do_setup_virtualenv(cached_venv_path, requirements_file,
                            virtualenv_args or [])
        open(success_stamp, 'w').close()

    print("Using cached Python venv from %s" % (cached_venv_path, ))
    if target_venv_path is not None:
        run_as_root(["ln", "-nsf", cached_venv_path, target_venv_path])
        if patch_activate_script:
            do_patch_activate_script(target_venv_path)
    return cached_venv_path
Пример #3
0
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
    # type: (str, str, List[str]) -> None

    # Setup Python virtualenv
    new_packages = set(get_package_names(requirements_file))

    run_as_root(["rm", "-rf", venv_path])
    if not try_to_copy_venv(venv_path, new_packages):
        # Create new virtualenv.
        run_as_root(["mkdir", "-p", venv_path])
        run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
        run_as_root([
            "chown", "-R", "{}:{}".format(os.getuid(), os.getgid()), venv_path
        ])
        create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)

    create_requirements_index_file(venv_path, requirements_file)

    pip = os.path.join(venv_path, "bin", "pip")

    # use custom certificate if needed
    if os.environ.get('CUSTOM_CA_CERTIFICATES'):
        print("Configuring pip to use custom CA certificates...")
        add_cert_to_pipconf()

    try:
        install_venv_deps(pip, requirements_file)
    except subprocess.CalledProcessError:
        # Might be a failure due to network connection issues. Retrying...
        print(WARNING + "`pip install` failed; retrying..." + ENDC)
        install_venv_deps(pip, requirements_file)

    run_as_root(["chmod", "-R", "a+rX", venv_path])
Пример #4
0
def do_setup_virtualenv(venv_path: str, requirements_file: str) -> None:

    # Set up Python virtualenv
    new_packages = set(get_package_names(requirements_file))

    run_as_root(["rm", "-rf", venv_path])
    if not try_to_copy_venv(venv_path, new_packages):
        # Create new virtualenv.
        run_as_root(["mkdir", "-p", venv_path])
        run_as_root(
            ["virtualenv", "-p", "python3", "--no-download", venv_path])
        run_as_root(["chown", "-R", f"{os.getuid()}:{os.getgid()}", venv_path])
        create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)

    create_requirements_index_file(venv_path, requirements_file)

    pip = os.path.join(venv_path, "bin", "pip")

    # use custom certificate if needed
    if os.environ.get("CUSTOM_CA_CERTIFICATES"):
        print("Configuring pip to use custom CA certificates...")
        add_cert_to_pipconf()

    try:
        install_venv_deps(pip, requirements_file)
    except subprocess.CalledProcessError:
        try:
            # Might be a failure due to network connection issues. Retrying...
            print(WARNING + "`pip install` failed; retrying..." + ENDC)
            install_venv_deps(pip, requirements_file)
        except BaseException as e:
            # Suppress exception chaining
            raise e from None

    run_as_root(["chmod", "-R", "a+rX", venv_path])
Пример #5
0
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
    # type: (str, str, List[str]) -> None

    # Setup Python virtualenv
    new_packages = set(get_package_names(requirements_file))

    run_as_root(["rm", "-rf", venv_path])
    if not try_to_copy_venv(venv_path, new_packages):
        # Create new virtualenv.
        run_as_root(["mkdir", "-p", venv_path])
        run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
        run_as_root(["chown", "-R",
                     "{}:{}".format(os.getuid(), os.getgid()), venv_path])
        create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)

    create_requirements_index_file(venv_path, requirements_file)

    pip = os.path.join(venv_path, "bin", "pip")

    # use custom certificate if needed
    if os.environ.get('CUSTOM_CA_CERTIFICATES'):
        print("Configuring pip to use custom CA certificates...")
        add_cert_to_pipconf()

    # CentOS-specific hack/workaround
    # Install pycurl with custom flag due to this error when installing
    # via pip:
    # __main__.ConfigurationError: Curl is configured to use SSL, but
    # we have not been able to determine which SSL backend it is using.
    # Please see PycURL documentation for how to specify the SSL
    # backend manually.
    # See https://github.com/pycurl/pycurl/issues/526
    # The fix exists on pycurl master, but not yet in any release
    # We can likely remove this when pycurl > 7.43.0.2 comes out.
    if os.path.exists("/etc/redhat-release"):
        pycurl_env = os.environ.copy()
        pycurl_env["PYCURL_SSL_LIBRARY"] = "nss"
        run([pip, "install", "pycurl==7.43.0.2", "--compile", "--no-cache-dir"],
            env=pycurl_env)

    try:
        install_venv_deps(pip, requirements_file)
    except subprocess.CalledProcessError:
        # Might be a failure due to network connection issues. Retrying...
        print(WARNING + "`pip install` failed; retrying..." + ENDC)
        install_venv_deps(pip, requirements_file)

    # The typing module has been included in stdlib since 3.5.
    # Installing a pypi version of it has been harmless until a bug
    # "AttributeError: type object 'Callable' has no attribute
    # '_abc_registry'" happens in 3.7. And so just to be safe, it is
    # disabled from now on for all >= 3.5 versions.
    # Remove this once 3.4 is no longer supported.
    at_least_35 = (sys.version_info.major == 3) and (sys.version_info.minor >= 5)
    if at_least_35 and ('python2.7' not in virtualenv_args):
        run([pip, "uninstall", "-y", "typing"])

    run_as_root(["chmod", "-R", "a+rX", venv_path])
Пример #6
0
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
    # type: (str, str, List[str]) -> None

    # Setup Python virtualenv
    new_packages = set(get_package_names(requirements_file))

    run_as_root(["rm", "-rf", venv_path])
    if not try_to_copy_venv(venv_path, new_packages):
        # Create new virtualenv.
        run_as_root(["mkdir", "-p", venv_path])
        run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
        run_as_root(["chown", "-R",
                     "{}:{}".format(os.getuid(), os.getgid()), venv_path])
        create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)

    create_requirements_index_file(venv_path, requirements_file)

    pip = os.path.join(venv_path, "bin", "pip")

    # use custom certificate if needed
    if os.environ.get('CUSTOM_CA_CERTIFICATES'):
        print("Configuring pip to use custom CA certificates...")
        add_cert_to_pipconf()

    # CentOS-specific hack/workaround
    # Install pycurl with custom flag due to this error when installing
    # via pip:
    # __main__.ConfigurationError: Curl is configured to use SSL, but
    # we have not been able to determine which SSL backend it is using.
    # Please see PycURL documentation for how to specify the SSL
    # backend manually.
    # See https://github.com/pycurl/pycurl/issues/526
    # The fix exists on pycurl master, but not yet in any release
    # We can likely remove this when pycurl > 7.43.0.2 comes out.
    if os.path.exists("/etc/redhat-release"):
        pycurl_env = os.environ.copy()
        pycurl_env["PYCURL_SSL_LIBRARY"] = "nss"
        run([pip, "install", "pycurl==7.43.0.2", "--compile", "--no-cache-dir"],
            env=pycurl_env)

    try:
        install_venv_deps(pip, requirements_file)
    except subprocess.CalledProcessError:
        # Might be a failure due to network connection issues. Retrying...
        print(WARNING + "`pip install` failed; retrying..." + ENDC)
        install_venv_deps(pip, requirements_file)

    # The typing module has been included in stdlib since 3.5.
    # Installing a pypi version of it has been harmless until a bug
    # "AttributeError: type object 'Callable' has no attribute
    # '_abc_registry'" happens in 3.7. And so just to be safe, it is
    # disabled from now on for all >= 3.5 versions.
    # Remove this once 3.4 is no longer supported.
    at_least_35 = (sys.version_info.major == 3) and (sys.version_info.minor >= 5)
    if at_least_35 and ('python2.7' not in virtualenv_args):
        run([pip, "uninstall", "-y", "typing"])

    run_as_root(["chmod", "-R", "a+rX", venv_path])
Пример #7
0
def install_apt_deps(deps_to_install):
    # type: (List[str]) -> None
    # setup-apt-repo does an `apt-get update`
    run_as_root(["./scripts/lib/setup-apt-repo"])
    run_as_root(
        [
            "env", "DEBIAN_FRONTEND=noninteractive",
            "apt-get", "-y", "install", "--no-install-recommends",
        ]
        + deps_to_install
    )
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
    # type: (str, str, List[str]) -> None

    # Setup Python virtualenv
    new_packages = set(get_package_names(requirements_file))

    run_as_root(["rm", "-rf", venv_path])
    if not try_to_copy_venv(venv_path, new_packages):
        # Create new virtualenv.
        run_as_root(["mkdir", "-p", venv_path])
        run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
        run_as_root([
            "chown", "-R", "{}:{}".format(os.getuid(), os.getgid()), venv_path
        ])
        create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)

    create_requirements_index_file(venv_path, requirements_file)

    pip = os.path.join(venv_path, "bin", "pip")

    # use custom certificate if needed
    if os.environ.get('CUSTOM_CA_CERTIFICATES'):
        print("Configuring pip to use custom CA certificates...")
        add_cert_to_pipconf()

    # CentOS-specific hack/workaround
    # Install pycurl with custom flag due to this error when installing
    # via pip:
    # __main__.ConfigurationError: Curl is configured to use SSL, but
    # we have not been able to determine which SSL backend it is using.
    # Please see PycURL documentation for how to specify the SSL
    # backend manually.
    # See https://github.com/pycurl/pycurl/issues/526
    # The fix exists on pycurl master, but not yet in any release
    # We can likely remove this when pycurl > 7.43.0.2 comes out.
    if os.path.exists("/etc/redhat-release"):
        pycurl_env = os.environ.copy()
        pycurl_env["PYCURL_SSL_LIBRARY"] = "nss"
        run([
            pip, "install", "pycurl==7.43.0.2", "--compile", "--no-cache-dir"
        ],
            env=pycurl_env)

    try:
        install_venv_deps(pip, requirements_file)
    except subprocess.CalledProcessError:
        # Might be a failure due to network connection issues. Retrying...
        print(WARNING + "`pip install` failed; retrying..." + ENDC)
        install_venv_deps(pip, requirements_file)

    run_as_root(["chmod", "-R", "a+rX", venv_path])
Пример #9
0
def install_system_deps() -> None:

    # By doing list -> set -> list conversion, we remove duplicates.
    deps_to_install = sorted(set(SYSTEM_DEPENDENCIES))

    if "fedora" in os_families():
        install_yum_deps(deps_to_install)
    elif "debian" in os_families():
        install_apt_deps(deps_to_install)
    else:
        raise AssertionError("Invalid vendor")

    # For some platforms, there aren't published PGroonga
    # packages available, so we build them from source.
    if BUILD_PGROONGA_FROM_SOURCE:
        run_as_root(["./scripts/lib/build-pgroonga"])
Пример #10
0
def install_system_deps(retry=False):
    # type: (bool) -> None

    # By doing list -> set -> list conversion, we remove duplicates.
    deps_to_install = list(set(SYSTEM_DEPENDENCIES))

    if family == 'redhat':
        install_yum_deps(deps_to_install, retry=retry)
    elif vendor in ["Debian", "Ubuntu"]:
        install_apt_deps(deps_to_install, retry=retry)
    else:
        raise AssertionError("Invalid vendor")

    # For some platforms, there aren't published pgroonga or
    # tsearch-extra packages available, so we build them from source.
    if BUILD_PGROONGA_FROM_SOURCE:
        run_as_root(["./scripts/lib/build-pgroonga"])
    if BUILD_TSEARCH_FROM_SOURCE:
        run_as_root(["./scripts/lib/build-tsearch-extras"])
Пример #11
0
def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None, patch_activate_script=False):
    # type: (Optional[str], str, Optional[List[str]], bool) -> str

    # Check if a cached version already exists
    path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
    output = subprocess.check_output([path, requirements_file], universal_newlines=True)
    sha1sum = output.split()[0]
    if target_venv_path is None:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')
    else:
        cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))
    success_stamp = os.path.join(cached_venv_path, "success-stamp")
    if not os.path.exists(success_stamp):
        do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])
        open(success_stamp, 'w').close()

    print("Using cached Python venv from %s" % (cached_venv_path,))
    if target_venv_path is not None:
        run_as_root(["ln", "-nsf", cached_venv_path, target_venv_path])
        if patch_activate_script:
            do_patch_activate_script(target_venv_path)
    return cached_venv_path
Пример #12
0
def install_apt_deps(deps_to_install, retry=False):
    # type: (List[str], bool) -> None
    if retry:
        print(WARNING + "`apt-get -y install` failed while installing dependencies; retrying..." + ENDC)
        # Since a common failure mode is for the caching in
        # `setup-apt-repo` to optimize the fast code path to skip
        # running `apt-get update` when the target apt repository
        # is out of date, we run it explicitly here so that we
        # recover automatically.
        run_as_root(['apt-get', 'update'])

    # setup-apt-repo does an `apt-get update`
    run_as_root(["./scripts/lib/setup-apt-repo"])
    run_as_root(["apt-get", "-y", "install", "--no-install-recommends"] + deps_to_install)
Пример #13
0
def install_apt_deps(deps_to_install: List[str]) -> None:
    # setup-apt-repo does an `apt-get update` if the sources.list files changed.
    run_as_root(["./scripts/lib/setup-apt-repo"])

    # But we still need to do our own to make sure we have up-to-date
    # data before installing new packages, as the system might not have
    # done an apt update in weeks otherwise, which could result in 404s
    # trying to download old versions that were already removed from mirrors.
    run_as_root(["apt-get", "update"])
    run_as_root(
        [
            "env", "DEBIAN_FRONTEND=noninteractive",
            "apt-get", "-y", "install", "--no-install-recommends", *deps_to_install,
        ]
    )
Пример #14
0
def main(options: argparse.Namespace) -> int:
    setup_bash_profile()
    setup_shell_profile("~/.zprofile")

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    create_var_directories()

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    if options.is_force or need_to_run_build_pygments_data():
        run(["tools/setup/build_pygments_data"])
        write_new_digest(
            "build_pygments_data_hash",
            build_pygments_data_paths(),
            [pygments_version],
        )
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    if options.is_force or need_to_run_build_timezone_data():
        run(["tools/setup/build_timezone_values"])
        write_new_digest(
            "build_timezones_data_hash",
            build_timezones_data_paths(),
            [timezones_version],
        )
    else:
        print("No need to run `tools/setup/build_timezone_values`.")

    if options.is_force or need_to_run_inline_email_css():
        run(["scripts/setup/inline_email_css.py"])
        write_new_digest(
            "last_email_source_files_hash",
            inline_email_css_paths(),
        )
    else:
        print("No need to run `scripts/setup/inline_email_css.py`.")

    if not options.is_build_release_tarball_only:
        # The following block is skipped when we just need the development
        # environment to build a release tarball.

        # Need to set up Django before using template_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django

        django.setup()

        from django.conf import settings

        from zerver.lib.test_fixtures import (
            DEV_DATABASE,
            TEST_DATABASE,
            destroy_leaked_test_databases,
        )

        if options.is_force or need_to_run_configure_rabbitmq(
            [settings.RABBITMQ_PASSWORD]):
            run_as_root(["scripts/setup/configure-rabbitmq"])
            write_new_digest(
                "last_configure_rabbitmq_hash",
                configure_rabbitmq_paths(),
                [settings.RABBITMQ_PASSWORD],
            )
        else:
            print("No need to run `scripts/setup/configure-rabbitmq.")

        dev_template_db_status = DEV_DATABASE.template_status()
        if options.is_force or dev_template_db_status == "needs_rebuild":
            run(["tools/setup/postgresql-init-dev-db"])
            if options.skip_dev_db_build:
                # We don't need to build the manual development
                # database on continuous integration for running tests, so we can
                # just leave it as a template db and save a minute.
                #
                # Important: We don't write a digest as that would
                # incorrectly claim that we ran migrations.
                pass
            else:
                run(["tools/rebuild-dev-database"])
                DEV_DATABASE.write_new_db_digest()
        elif dev_template_db_status == "run_migrations":
            DEV_DATABASE.run_db_migrations()
        elif dev_template_db_status == "current":
            print("No need to regenerate the dev DB.")

        test_template_db_status = TEST_DATABASE.template_status()
        if options.is_force or test_template_db_status == "needs_rebuild":
            run(["tools/setup/postgresql-init-test-db"])
            run(["tools/rebuild-test-database"])
            TEST_DATABASE.write_new_db_digest()
        elif test_template_db_status == "run_migrations":
            TEST_DATABASE.run_db_migrations()
        elif test_template_db_status == "current":
            print("No need to regenerate the test DB.")

        if options.is_force or need_to_run_compilemessages():
            run(["./manage.py", "compilemessages"])
            write_new_digest(
                "last_compilemessages_hash",
                compilemessages_paths(),
            )
        else:
            print("No need to run `manage.py compilemessages`.")

        destroyed = destroy_leaked_test_databases()
        if destroyed:
            print(f"Dropped {destroyed} stale test databases!")

    clean_unused_caches.main(
        argparse.Namespace(
            threshold_days=6,
            # The defaults here should match parse_cache_script_args in zulip_tools.py
            dry_run=False,
            verbose=False,
            no_headings=True,
        ))

    # Keeping this cache file around can cause eslint to throw
    # random TypeErrors when new/updated dependencies are added
    if os.path.isfile(".eslintcache"):
        # Remove this block when
        # https://github.com/eslint/eslint/issues/11639 is fixed
        # upstream.
        os.remove(".eslintcache")

    # Clean up the root of the `var/` directory for various
    # testing-related files that we have migrated to
    # `var/<uuid>/test-backend`.
    print("Cleaning var/ directory files...")
    var_paths = glob.glob("var/test*")
    var_paths.append("var/bot_avatar")
    for path in var_paths:
        try:
            if os.path.isdir(path):
                shutil.rmtree(path)
            else:
                os.remove(path)
        except FileNotFoundError:
            pass

    version_file = os.path.join(UUID_VAR_PATH, "provision_version")
    print(f"writing to {version_file}\n")
    with open(version_file, "w") as f:
        f.write(PROVISION_VERSION + "\n")

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
Пример #15
0
def try_to_copy_venv(venv_path, new_packages):
    # type: (str, Set[str]) -> bool
    """
    Tries to copy packages from an old virtual environment in the cache
    to the new virtual environment. The algorithm works as follows:
        1. Find a virtual environment, v, from the cache that has the
        highest overlap with the new requirements such that:
            a. The new requirements only add to the packages of v.
            b. The new requirements only upgrade packages of v.
        2. Copy the contents of v to the new virtual environment using
        virtualenv-clone.
        3. Delete all .pyc files in the new virtual environment.
    """
    if not os.path.exists(VENV_CACHE_PATH):
        return False

    venv_name = os.path.basename(venv_path)

    overlaps = []  # type: List[Tuple[int, str, Set[str]]]
    old_packages = set()  # type: Set[str]
    for sha1sum in os.listdir(VENV_CACHE_PATH):
        curr_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, venv_name)
        if (curr_venv_path == venv_path
                or not os.path.exists(get_index_filename(curr_venv_path))):
            continue

        old_packages = get_venv_packages(curr_venv_path)
        # We only consider using using old virtualenvs that only
        # contain packages that we want in our new virtualenv.
        if not (old_packages - new_packages):
            overlap = new_packages & old_packages
            overlaps.append((len(overlap), curr_venv_path, overlap))

    target_log = get_logfile_name(venv_path)
    source_venv_path = None
    if overlaps:
        # Here, we select the old virtualenv with the largest overlap
        overlaps = sorted(overlaps)
        _, source_venv_path, copied_packages = overlaps[-1]
        print('Copying packages from {}'.format(source_venv_path))
        clone_ve = "{}/bin/virtualenv-clone".format(source_venv_path)
        cmd = [clone_ve, source_venv_path, venv_path]

        try:
            # TODO: We can probably remove this in a few months, now
            # that we can expect that virtualenv-clone is present in
            # all of our recent virtualenvs.
            run_as_root(cmd)
        except subprocess.CalledProcessError:
            # Virtualenv-clone is either not installed or threw an
            # error.  Just return False: making a new venv is safe.
            logging.warning("Error cloning virtualenv %s" %
                            (source_venv_path, ))
            return False

        # virtualenv-clone, unfortunately, copies the success stamp,
        # which means if the upcoming `pip install` phase were to
        # fail, we'd end up with a broken half-provisioned virtualenv
        # that's incorrectly tagged as properly provisioned.  The
        # right fix is to use
        # https://github.com/edwardgeorge/virtualenv-clone/pull/38,
        # but this rm is almost as good.
        success_stamp_path = os.path.join(venv_path, 'success-stamp')
        run_as_root(["rm", "-f", success_stamp_path])

        run_as_root([
            "chown", "-R", "{}:{}".format(os.getuid(), os.getgid()), venv_path
        ])
        source_log = get_logfile_name(source_venv_path)
        copy_parent_log(source_log, target_log)
        create_log_entry(target_log, source_venv_path, copied_packages,
                         new_packages - copied_packages)
        return True

    return False
Пример #16
0
def main(options):
    # type: (Any) -> int

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode('utf8'))
    if vendor in ["Ubuntu", "Debian"]:
        sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())
    else:
        # hash the content of setup-yum-repo and build-*
        sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read())
        build_paths = glob.glob("scripts/lib/build-")
        for bp in build_paths:
            sha_sum.update(open(bp, 'rb').read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, 'a+') as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if (new_apt_dependencies_hash != last_apt_dependencies_hash):
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            # Might be a failure due to network connection issues. Retrying...
            install_system_deps(retry=True)
        with open(apt_hash_file_path, 'w') as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root(proxy_env + ["scripts/lib/install-node"], sudo_args = ['-H'])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        # Hack: We remove `node_modules` as root to work around an
        # issue with the symlinks being improperly owned by root.
        if os.path.islink("node_modules"):
            run_as_root(["rm", "-f", "node_modules"])
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root(["chown", "%s:%s" % (user_id, user_id), NODE_MODULES_CACHE_PATH])
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        setup_node_modules()

    # Install shellcheck.
    run_as_root(["scripts/lib/install-shellcheck"])

    from tools.setup import setup_venvs
    setup_venvs.main()

    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    exec(open(activate_this).read(), {}, dict(__file__=activate_this))

    setup_shell_profile('~/.bash_profile')
    setup_shell_profile('~/.zprofile')

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    # create log directory `zulip/var/log`
    os.makedirs(LOG_DIR_PATH, exist_ok=True)
    # create upload directory `var/uploads`
    os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
    # create test upload directory `var/test_upload`
    os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
    # create coverage directory `var/coverage`
    os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
    # create linecoverage directory `var/node-coverage`
    os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    if not os.path.isdir(EMOJI_CACHE_PATH):
        run_as_root(["mkdir", EMOJI_CACHE_PATH])
    run_as_root(["chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    webfont_paths = ["tools/setup/generate-custom-icon-webfont", "static/icons/fonts/template.hbs"]
    webfont_paths += glob.glob('static/assets/icons/*')
    if file_or_package_hash_updated(webfont_paths, "webfont_files_hash", options.is_force):
        run(["tools/setup/generate-custom-icon-webfont"])
    else:
        print("No need to run `tools/setup/generate-custom-icon-webfont`.")

    build_pygments_data_paths = ["tools/setup/build_pygments_data", "tools/setup/lang.json"]
    from pygments import __version__ as pygments_version
    if file_or_package_hash_updated(build_pygments_data_paths, "build_pygments_data_hash", options.is_force,
                                    [pygments_version]):
        run(["tools/setup/build_pygments_data"])
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    update_authors_json_paths = ["tools/update-authors-json", "zerver/tests/fixtures/authors.json"]
    if file_or_package_hash_updated(update_authors_json_paths, "update_authors_json_hash", options.is_force):
        run(["tools/update-authors-json", "--use-fixture"])
    else:
        print("No need to run `tools/update-authors-json`.")

    email_source_paths = ["tools/inline-email-css", "templates/zerver/emails/email.css"]
    email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
    if file_or_package_hash_updated(email_source_paths, "last_email_source_files_hash", options.is_force):
        run(["tools/inline-email-css"])
    else:
        print("No need to run `tools/inline-email-css`.")

    if is_circleci or (is_travis and not options.is_production_travis):
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
        run_as_root(["service", "postgresql", "restart"])
    elif family == 'redhat':
        for service in ["postgresql-%s" % (POSTGRES_VERSION,), "rabbitmq-server", "memcached", "redis"]:
            run_as_root(["systemctl", "enable", service], sudo_args = ['-H'])
            run_as_root(["systemctl", "start", service], sudo_args = ['-H'])
    elif options.is_docker:
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
        run_as_root(["pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
    if not options.is_production_travis:
        # The following block is skipped for the production Travis
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_database_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import template_database_status, run_db_migrations

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("RabbitMQ is already configured.")

        migration_status_path = os.path.join(UUID_VAR_PATH, "migration_status_dev")
        dev_template_db_status = template_database_status(
            migration_status=migration_status_path,
            settings="zproject.settings",
            database_name="zulip",
        )
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/do-destroy-rebuild-database"])
        elif dev_template_db_status == 'run_migrations':
            run_db_migrations('dev')
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = template_database_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/do-destroy-rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            run_db_migrations('test')
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        # Consider updating generated translations data: both `.mo`
        # files and `language-options.json`.
        paths = ['zerver/management/commands/compilemessages.py']
        paths += glob.glob('static/locale/*/LC_MESSAGES/*.po')
        paths += glob.glob('static/locale/*/translations.json')

        if file_or_package_hash_updated(paths, "last_compilemessages_hash", options.is_force):
            run(["./manage.py", "compilemessages"])
        else:
            print("No need to run `manage.py compilemessages`.")

    run(["scripts/lib/clean-unused-caches"])

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file,))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
Пример #17
0
def install_yum_deps(deps_to_install, retry=False):
    # type: (List[str], bool) -> None
    print(WARNING + "RedHat support is still experimental.")
    run_as_root(["./scripts/lib/setup-yum-repo"])

    # Hack specific to unregistered RHEL system.  The moreutils
    # package requires a perl module package, which isn't available in
    # the unregistered RHEL repositories.
    #
    # Error: Package: moreutils-0.49-2.el7.x86_64 (epel)
    #        Requires: perl(IPC::Run)
    yum_extra_flags = []  # type: List[str]
    if vendor == 'RedHat':
        exitcode, subs_status = subprocess.getstatusoutput("sudo subscription-manager status")
        if exitcode == 1:
            # TODO this might overkill since `subscription-manager` is already
            # called in setup-yum-repo
            if 'Status' in subs_status:
                # The output is well-formed
                yum_extra_flags = ["--skip-broken"]
            else:
                print("Unrecognized output. `subscription-manager` might not be available")

    run_as_root(["yum", "install", "-y"] + yum_extra_flags + deps_to_install)
    if vendor in ["CentOS", "RedHat"]:
        # This is how a pip3 is installed to /usr/bin in CentOS/RHEL
        # for python35 and later.
        run_as_root(["python36", "-m", "ensurepip"])
        # `python36` is not aliased to `python3` by default
        run_as_root(["ln", "-nsf", "/usr/bin/python36", "/usr/bin/python3"])
    postgres_dir = 'pgsql-%s' % (POSTGRES_VERSION,)
    for cmd in ['pg_config', 'pg_isready', 'psql']:
        # Our tooling expects these postgres scripts to be at
        # well-known paths.  There's an argument for eventually
        # making our tooling auto-detect, but this is simpler.
        run_as_root(["ln", "-nsf", "/usr/%s/bin/%s" % (postgres_dir, cmd),
                     "/usr/bin/%s" % (cmd,)])

    # From here, we do the first-time setup/initialization for the postgres database.
    pg_datadir = "/var/lib/pgsql/%s/data" % (POSTGRES_VERSION,)
    pg_hba_conf = os.path.join(pg_datadir, "pg_hba.conf")

    # We can't just check if the file exists with os.path, since the
    # current user likely doesn't have permission to read the
    # pg_datadir directory.
    if subprocess.call(["sudo", "test", "-e", pg_hba_conf]) == 0:
        # Skip setup if it has been applied previously
        return

    run_as_root(["/usr/%s/bin/postgresql-%s-setup" % (postgres_dir, POSTGRES_VERSION), "initdb"],
                sudo_args = ['-H'])
    # Use vendored pg_hba.conf, which enables password authentication.
    run_as_root(["cp", "-a", "puppet/zulip/files/postgresql/centos_pg_hba.conf", pg_hba_conf])
    # Later steps will ensure postgres is started

    # Link in tsearch data files
    overwrite_symlink("/usr/share/myspell/en_US.dic", "/usr/pgsql-%s/share/tsearch_data/en_us.dict"
                      % (POSTGRES_VERSION,))
    overwrite_symlink("/usr/share/myspell/en_US.aff", "/usr/pgsql-%s/share/tsearch_data/en_us.affix"
                      % (POSTGRES_VERSION,))
Пример #18
0
    sys.exit(1)

if platform.architecture()[0] == '64bit':
    arch = 'amd64'
elif platform.architecture()[0] == '32bit':
    arch = "i386"
else:
    logging.critical("Only x86 is supported;"
                     "ping [email protected] if you want another architecture.")
    sys.exit(1)

# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
is_rhel_based = os.path.exists("/etc/redhat-release")
if (not is_rhel_based) and (not os.path.exists("/usr/bin/lsb_release")):
    run_as_root(["apt-get", "install", "-y", "lsb-release"])

distro_info = parse_lsb_release()
vendor = distro_info['DISTRIB_ID']
codename = distro_info['DISTRIB_CODENAME']
family = distro_info['DISTRIB_FAMILY']
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
    logging.critical("Unsupported platform: {} {}".format(vendor, codename))
    sys.exit(1)

POSTGRES_VERSION_MAP = {
    "stretch": "9.6",
    "trusty": "9.3",
    "xenial": "9.5",
    "bionic": "10",
    "cosmic": "10",
Пример #19
0
def main(options):
    # type: (argparse.Namespace) -> NoReturn

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode('utf8'))
    if vendor in ["ubuntu", "debian"]:
        sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())
    else:
        # hash the content of setup-yum-repo and build-*
        sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read())
        build_paths = glob.glob("scripts/lib/build-")
        for bp in build_paths:
            sha_sum.update(open(bp, 'rb').read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, 'a+') as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if (new_apt_dependencies_hash != last_apt_dependencies_hash):
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            # Might be a failure due to network connection issues. Retrying...
            print(WARNING +
                  "Installing system dependencies failed; retrying..." + ENDC)
            install_system_deps()
        with open(apt_hash_file_path, 'w') as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root(proxy_env + ["scripts/lib/install-node"], sudo_args=['-H'])

    if not os.access(NODE_MODULES_CACHE_PATH, os.W_OK):
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root([
            "chown",
            "%s:%s" % (os.getuid(), os.getgid()), NODE_MODULES_CACHE_PATH
        ])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        try:
            setup_node_modules()
        except subprocess.CalledProcessError:
            print(
                FAIL +
                "`yarn install` is failing; check your network connection (and proxy settings)."
                + ENDC)
            sys.exit(1)

    # Install shellcheck.
    run_as_root(["scripts/lib/install-shellcheck"])

    setup_venvs.main()

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    if is_circleci or (is_travis and not options.is_production_travis):
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
        run_as_root(["service", "postgresql", "restart"])
    elif family == 'redhat':
        for service in [
                "postgresql-%s" % (POSTGRES_VERSION, ), "rabbitmq-server",
                "memcached", "redis"
        ]:
            run_as_root(["systemctl", "enable", service], sudo_args=['-H'])
            run_as_root(["systemctl", "start", service], sudo_args=['-H'])

    # If we imported modules after activating the virtualenv in this
    # Python process, they could end up mismatching with modules we’ve
    # already imported from outside the virtualenv.  That seems like a
    # bad idea, and empirically it can cause Python to segfault on
    # certain cffi-related imports.  Instead, start a new Python
    # process inside the virtualenv.
    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    provision_inner = os.path.join(ZULIP_PATH, "tools", "lib",
                                   "provision_inner.py")
    exec(open(activate_this).read(), dict(__file__=activate_this))
    os.execvp(provision_inner, [
        provision_inner,
        *(["--force"] if options.is_force else []),
        *(["--production-travis"] if options.is_production_travis else []),
    ])
Пример #20
0
def main(options: argparse.Namespace) -> int:
    setup_shell_profile('~/.bash_profile')
    setup_shell_profile('~/.zprofile')

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    # create log directory `zulip/var/log`
    os.makedirs(LOG_DIR_PATH, exist_ok=True)
    # create upload directory `var/uploads`
    os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
    # create test upload directory `var/test_upload`
    os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
    # create coverage directory `var/coverage`
    os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
    # create linecoverage directory `var/node-coverage`
    os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)
    # create XUnit XML test results directory`var/xunit-test-results`
    os.makedirs(XUNIT_XML_TEST_RESULTS_DIR_PATH, exist_ok=True)

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    if not os.path.isdir(EMOJI_CACHE_PATH):
        run_as_root(["mkdir", EMOJI_CACHE_PATH])
    run_as_root(["chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    build_pygments_data_paths = [
        "tools/setup/build_pygments_data", "tools/setup/lang.json"
    ]
    from pygments import __version__ as pygments_version
    if file_or_package_hash_updated(build_pygments_data_paths,
                                    "build_pygments_data_hash",
                                    options.is_force, [pygments_version]):
        run(["tools/setup/build_pygments_data"])
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    update_authors_json_paths = [
        "tools/update-authors-json", "zerver/tests/fixtures/authors.json"
    ]
    if file_or_package_hash_updated(update_authors_json_paths,
                                    "update_authors_json_hash",
                                    options.is_force):
        run(["tools/update-authors-json", "--use-fixture"])
    else:
        print("No need to run `tools/update-authors-json`.")

    email_source_paths = [
        "tools/inline-email-css", "templates/zerver/emails/email.css"
    ]
    email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
    if file_or_package_hash_updated(email_source_paths,
                                    "last_email_source_files_hash",
                                    options.is_force):
        run(["tools/inline-email-css"])
    else:
        print("No need to run `tools/inline-email-css`.")

    if not options.is_production_travis:
        # The following block is skipped for the production Travis
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_database_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import template_database_status, run_db_migrations, \
            destroy_leaked_test_databases

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("RabbitMQ is already configured.")

        migration_status_path = os.path.join(UUID_VAR_PATH,
                                             "migration_status_dev")
        dev_template_db_status = template_database_status(
            migration_status=migration_status_path,
            settings="zproject.settings",
            database_name="zulip",
        )
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/do-destroy-rebuild-database"])
        elif dev_template_db_status == 'run_migrations':
            run_db_migrations('dev')
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = template_database_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/do-destroy-rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            run_db_migrations('test')
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        # Consider updating generated translations data: both `.mo`
        # files and `language-options.json`.
        paths = ['zerver/management/commands/compilemessages.py']
        paths += glob.glob('locale/*/LC_MESSAGES/*.po')
        paths += glob.glob('locale/*/translations.json')

        if file_or_package_hash_updated(paths, "last_compilemessages_hash",
                                        options.is_force):
            run(["./manage.py", "compilemessages"])
        else:
            print("No need to run `manage.py compilemessages`.")

        destroyed = destroy_leaked_test_databases()
        if destroyed:
            print("Dropped %s stale test databases!" % (destroyed, ))

    run(["scripts/lib/clean-unused-caches"])

    # Keeping this cache file around can cause eslint to throw
    # random TypeErrors when new/updated dependencies are added
    if os.path.isfile('.eslintcache'):
        # Remove this block when
        # https://github.com/eslint/eslint/issues/11639 is fixed
        # upstream.
        os.remove('.eslintcache')

    # Clean up the root of the `var/` directory for various
    # testing-related files that we have migrated to
    # `var/<uuid>/test-backend`.
    print("Cleaning var/ directory files...")
    var_paths = glob.glob('var/test*')
    var_paths.append('var/bot_avatar')
    for path in var_paths:
        try:
            if os.path.isdir(path):
                shutil.rmtree(path)
            else:
                os.remove(path)
        except FileNotFoundError:
            pass

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file, ))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
Пример #21
0
def main(options: argparse.Namespace) -> "NoReturn":

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode())
    if "debian" in os_families():
        with open("scripts/lib/setup-apt-repo", "rb") as fb:
            sha_sum.update(fb.read())
    else:
        # hash the content of setup-yum-repo*
        with open("scripts/lib/setup-yum-repo", "rb") as fb:
            sha_sum.update(fb.read())

    # hash the content of build-pgroonga if PGroonga is built from source
    if BUILD_PGROONGA_FROM_SOURCE:
        with open("scripts/lib/build-pgroonga", "rb") as fb:
            sha_sum.update(fb.read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, "a+") as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if new_apt_dependencies_hash != last_apt_dependencies_hash:
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            try:
                # Might be a failure due to network connection issues. Retrying...
                print(WARNING + "Installing system dependencies failed; retrying..." + ENDC)
                install_system_deps()
            except BaseException as e:
                # Suppress exception chaining
                raise e from None
        with open(apt_hash_file_path, "w") as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root([*proxy_env, "scripts/lib/install-node"], sudo_args=["-H"])
    run_as_root([*proxy_env, "scripts/lib/install-yarn"])

    if not os.access(NODE_MODULES_CACHE_PATH, os.W_OK):
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root(["chown", f"{os.getuid()}:{os.getgid()}", NODE_MODULES_CACHE_PATH])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        try:
            setup_node_modules()
        except subprocess.CalledProcessError:
            print(
                FAIL
                + "`yarn install` is failing; check your network connection (and proxy settings)."
                + ENDC
            )
            sys.exit(1)

    # Install shellcheck.
    run_as_root(["tools/setup/install-shellcheck"])
    # Install shfmt.
    run_as_root(["tools/setup/install-shfmt"])

    setup_venvs.main()

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    if CONTINUOUS_INTEGRATION and not options.is_build_release_tarball_only:
        run_as_root(["service", "redis-server", "start"])
        run_as_root(["service", "memcached", "start"])
        run_as_root(["service", "rabbitmq-server", "start"])
        run_as_root(["service", "postgresql", "start"])
    elif "fedora" in os_families():
        # These platforms don't enable and start services on
        # installing their package, so we do that here.
        for service in [
            f"postgresql-{POSTGRESQL_VERSION}",
            "rabbitmq-server",
            "memcached",
            "redis",
        ]:
            run_as_root(["systemctl", "enable", service], sudo_args=["-H"])
            run_as_root(["systemctl", "start", service], sudo_args=["-H"])

    # If we imported modules after activating the virtualenv in this
    # Python process, they could end up mismatching with modules we’ve
    # already imported from outside the virtualenv.  That seems like a
    # bad idea, and empirically it can cause Python to segfault on
    # certain cffi-related imports.  Instead, start a new Python
    # process inside the virtualenv.
    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    provision_inner = os.path.join(ZULIP_PATH, "tools", "lib", "provision_inner.py")
    with open(activate_this) as f:
        exec(f.read(), dict(__file__=activate_this))
    os.execvp(
        provision_inner,
        [
            provision_inner,
            *(["--force"] if options.is_force else []),
            *(["--build-release-tarball-only"] if options.is_build_release_tarball_only else []),
            *(["--skip-dev-db-build"] if options.skip_dev_db_build else []),
        ],
    )
Пример #22
0
def try_to_copy_venv(venv_path, new_packages):
    # type: (str, Set[str]) -> bool
    """
    Tries to copy packages from an old virtual environment in the cache
    to the new virtual environment. The algorithm works as follows:
        1. Find a virtual environment, v, from the cache that has the
        highest overlap with the new requirements such that:
            a. The new requirements only add to the packages of v.
            b. The new requirements only upgrade packages of v.
        2. Copy the contents of v to the new virtual environment using
        virtualenv-clone.
        3. Delete all .pyc files in the new virtual environment.
    """
    if not os.path.exists(VENV_CACHE_PATH):
        return False

    venv_name = os.path.basename(venv_path)

    overlaps = []  # type: List[Tuple[int, str, Set[str]]]
    old_packages = set()  # type: Set[str]
    for sha1sum in os.listdir(VENV_CACHE_PATH):
        curr_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, venv_name)
        if (curr_venv_path == venv_path or
                not os.path.exists(get_index_filename(curr_venv_path))):
            continue

        old_packages = get_venv_packages(curr_venv_path)
        # We only consider using using old virtualenvs that only
        # contain packages that we want in our new virtualenv.
        if not (old_packages - new_packages):
            overlap = new_packages & old_packages
            overlaps.append((len(overlap), curr_venv_path, overlap))

    target_log = get_logfile_name(venv_path)
    source_venv_path = None
    if overlaps:
        # Here, we select the old virtualenv with the largest overlap
        overlaps = sorted(overlaps)
        _, source_venv_path, copied_packages = overlaps[-1]
        print('Copying packages from {}'.format(source_venv_path))
        clone_ve = "{}/bin/virtualenv-clone".format(source_venv_path)
        cmd = [clone_ve, source_venv_path, venv_path]

        try:
            # TODO: We can probably remove this in a few months, now
            # that we can expect that virtualenv-clone is present in
            # all of our recent virtualenvs.
            run_as_root(cmd)
        except subprocess.CalledProcessError:
            # Virtualenv-clone is either not installed or threw an
            # error.  Just return False: making a new venv is safe.
            logging.warning("Error cloning virtualenv %s" % (source_venv_path,))
            return False

        # virtualenv-clone, unfortunately, copies the success stamp,
        # which means if the upcoming `pip install` phase were to
        # fail, we'd end up with a broken half-provisioned virtualenv
        # that's incorrectly tagged as properly provisioned.  The
        # right fix is to use
        # https://github.com/edwardgeorge/virtualenv-clone/pull/38,
        # but this rm is almost as good.
        success_stamp_path = os.path.join(venv_path, 'success-stamp')
        run_as_root(["rm", "-f", success_stamp_path])

        run_as_root(["chown", "-R",
                     "{}:{}".format(os.getuid(), os.getgid()), venv_path])
        source_log = get_logfile_name(source_venv_path)
        copy_parent_log(source_log, target_log)
        create_log_entry(target_log, source_venv_path, copied_packages,
                         new_packages - copied_packages)
        return True

    return False
Пример #23
0
def install_yum_deps(deps_to_install: List[str]) -> None:
    print(WARNING + "RedHat support is still experimental." + ENDC)
    run_as_root(["./scripts/lib/setup-yum-repo"])

    # Hack specific to unregistered RHEL system.  The moreutils
    # package requires a perl module package, which isn't available in
    # the unregistered RHEL repositories.
    #
    # Error: Package: moreutils-0.49-2.el7.x86_64 (epel)
    #        Requires: perl(IPC::Run)
    yum_extra_flags: List[str] = []
    if vendor == "rhel":
        exitcode, subs_status = subprocess.getstatusoutput(
            "sudo subscription-manager status")
        if exitcode == 1:
            # TODO this might overkill since `subscription-manager` is already
            # called in setup-yum-repo
            if "Status" in subs_status:
                # The output is well-formed
                yum_extra_flags = ["--skip-broken"]
            else:
                print(
                    "Unrecognized output. `subscription-manager` might not be available"
                )

    run_as_root(["yum", "install", "-y", *yum_extra_flags, *deps_to_install])
    if "rhel" in os_families():
        # This is how a pip3 is installed to /usr/bin in CentOS/RHEL
        # for python35 and later.
        run_as_root(["python36", "-m", "ensurepip"])
        # `python36` is not aliased to `python3` by default
        run_as_root(["ln", "-nsf", "/usr/bin/python36", "/usr/bin/python3"])
    postgresql_dir = f"pgsql-{POSTGRESQL_VERSION}"
    for cmd in ["pg_config", "pg_isready", "psql"]:
        # Our tooling expects these PostgreSQL scripts to be at
        # well-known paths.  There's an argument for eventually
        # making our tooling auto-detect, but this is simpler.
        run_as_root([
            "ln", "-nsf", f"/usr/{postgresql_dir}/bin/{cmd}", f"/usr/bin/{cmd}"
        ])

    # From here, we do the first-time setup/initialization for the PostgreSQL database.
    pg_datadir = f"/var/lib/pgsql/{POSTGRESQL_VERSION}/data"
    pg_hba_conf = os.path.join(pg_datadir, "pg_hba.conf")

    # We can't just check if the file exists with os.path, since the
    # current user likely doesn't have permission to read the
    # pg_datadir directory.
    if subprocess.call(["sudo", "test", "-e", pg_hba_conf]) == 0:
        # Skip setup if it has been applied previously
        return

    run_as_root(
        [
            f"/usr/{postgresql_dir}/bin/postgresql-{POSTGRESQL_VERSION}-setup",
            "initdb"
        ],
        sudo_args=["-H"],
    )
    # Use vendored pg_hba.conf, which enables password authentication.
    run_as_root([
        "cp", "-a", "puppet/zulip/files/postgresql/centos_pg_hba.conf",
        pg_hba_conf
    ])
    # Later steps will ensure PostgreSQL is started

    # Link in tsearch data files
    run_as_root([
        "ln",
        "-nsf",
        "/usr/share/myspell/en_US.dic",
        f"/usr/pgsql-{POSTGRESQL_VERSION}/share/tsearch_data/en_us.dict",
    ])
    run_as_root([
        "ln",
        "-nsf",
        "/usr/share/myspell/en_US.aff",
        f"/usr/pgsql-{POSTGRESQL_VERSION}/share/tsearch_data/en_us.affix",
    ])