Ejemplo n.º 1
0
def get_venv_dependencies(vendor: str, os_version: str) -> List[str]:
    if "debian" in os_families():
        return VENV_DEPENDENCIES
    elif "rhel" in os_families():
        return REDHAT_VENV_DEPENDENCIES
    elif "fedora" in os_families():
        return FEDORA_VENV_DEPENDENCIES
    else:
        raise AssertionError("Invalid vendor")
Ejemplo n.º 2
0
def get_venv_dependencies(vendor: str, os_version: str) -> List[str]:
    if vendor == 'ubuntu' and os_version == '20.04':
        return VENV_DEPENDENCIES + [PYTHON_DEV_DEPENDENCY.format("2")]
    elif "debian" in os_families():
        return VENV_DEPENDENCIES + [PYTHON_DEV_DEPENDENCY.format("")]
    elif "rhel" in os_families():
        return REDHAT_VENV_DEPENDENCIES
    elif "fedora" in os_families():
        return FEDORA_VENV_DEPENDENCIES
    else:
        raise AssertionError("Invalid vendor")
Ejemplo n.º 3
0
def install_system_deps() -> None:

    # By doing list -> set -> list conversion, we remove duplicates.
    deps_to_install = sorted(set(SYSTEM_DEPENDENCIES))

    if "fedora" in os_families():
        install_yum_deps(deps_to_install)
    elif "debian" in os_families():
        install_apt_deps(deps_to_install)
    else:
        raise AssertionError("Invalid vendor")

    # For some platforms, there aren't published PGroonga
    # packages available, so we build them from source.
    if BUILD_PGROONGA_FROM_SOURCE:
        run_as_root(["./scripts/lib/build-pgroonga"])
Ejemplo n.º 4
0
def install_yum_deps(deps_to_install):
    # type: (List[str]) -> None
    print(WARNING + "RedHat support is still experimental.")
    run_as_root(["./scripts/lib/setup-yum-repo"])

    # Hack specific to unregistered RHEL system.  The moreutils
    # package requires a perl module package, which isn't available in
    # the unregistered RHEL repositories.
    #
    # Error: Package: moreutils-0.49-2.el7.x86_64 (epel)
    #        Requires: perl(IPC::Run)
    yum_extra_flags = []  # type: List[str]
    if vendor == "rhel":
        exitcode, subs_status = subprocess.getstatusoutput("sudo subscription-manager status")
        if exitcode == 1:
            # TODO this might overkill since `subscription-manager` is already
            # called in setup-yum-repo
            if 'Status' in subs_status:
                # The output is well-formed
                yum_extra_flags = ["--skip-broken"]
            else:
                print("Unrecognized output. `subscription-manager` might not be available")

    run_as_root(["yum", "install", "-y"] + yum_extra_flags + deps_to_install)
    if "rhel" in os_families():
        # This is how a pip3 is installed to /usr/bin in CentOS/RHEL
        # for python35 and later.
        run_as_root(["python36", "-m", "ensurepip"])
        # `python36` is not aliased to `python3` by default
        run_as_root(["ln", "-nsf", "/usr/bin/python36", "/usr/bin/python3"])
    postgres_dir = 'pgsql-%s' % (POSTGRES_VERSION,)
    for cmd in ['pg_config', 'pg_isready', 'psql']:
        # Our tooling expects these postgres scripts to be at
        # well-known paths.  There's an argument for eventually
        # making our tooling auto-detect, but this is simpler.
        run_as_root(["ln", "-nsf", "/usr/%s/bin/%s" % (postgres_dir, cmd),
                     "/usr/bin/%s" % (cmd,)])

    # From here, we do the first-time setup/initialization for the postgres database.
    pg_datadir = "/var/lib/pgsql/%s/data" % (POSTGRES_VERSION,)
    pg_hba_conf = os.path.join(pg_datadir, "pg_hba.conf")

    # We can't just check if the file exists with os.path, since the
    # current user likely doesn't have permission to read the
    # pg_datadir directory.
    if subprocess.call(["sudo", "test", "-e", pg_hba_conf]) == 0:
        # Skip setup if it has been applied previously
        return

    run_as_root(["/usr/%s/bin/postgresql-%s-setup" % (postgres_dir, POSTGRES_VERSION), "initdb"],
                sudo_args = ['-H'])
    # Use vendored pg_hba.conf, which enables password authentication.
    run_as_root(["cp", "-a", "puppet/zulip/files/postgresql/centos_pg_hba.conf", pg_hba_conf])
    # Later steps will ensure postgres is started

    # Link in tsearch data files
    overwrite_symlink("/usr/share/myspell/en_US.dic", "/usr/pgsql-%s/share/tsearch_data/en_us.dict"
                      % (POSTGRES_VERSION,))
    overwrite_symlink("/usr/share/myspell/en_US.aff", "/usr/pgsql-%s/share/tsearch_data/en_us.affix"
                      % (POSTGRES_VERSION,))
Ejemplo n.º 5
0
def main(options):
    # type: (argparse.Namespace) -> NoReturn

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode('utf8'))
    if "debian" in os_families():
        sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())
    else:
        # hash the content of setup-yum-repo*
        sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read())

    # hash the content of build-pgroonga if pgroonga is built from source
    if BUILD_PGROONGA_FROM_SOURCE:
        sha_sum.update(open('scripts/lib/build-pgroonga', 'rb').read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, 'a+') as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if (new_apt_dependencies_hash != last_apt_dependencies_hash):
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            # Might be a failure due to network connection issues. Retrying...
            print(WARNING +
                  "Installing system dependencies failed; retrying..." + ENDC)
            install_system_deps()
        with open(apt_hash_file_path, 'w') as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root(proxy_env + ["scripts/lib/install-node"], sudo_args=['-H'])

    if not os.access(NODE_MODULES_CACHE_PATH, os.W_OK):
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root([
            "chown",
            "%s:%s" % (os.getuid(), os.getgid()), NODE_MODULES_CACHE_PATH
        ])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        try:
            setup_node_modules()
        except subprocess.CalledProcessError:
            print(
                FAIL +
                "`yarn install` is failing; check your network connection (and proxy settings)."
                + ENDC)
            sys.exit(1)

    # Install shellcheck.
    run_as_root(["tools/setup/install-shellcheck"])

    setup_venvs.main()

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    if is_circleci or (is_travis and not options.is_production_travis):
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
        run_as_root(["service", "postgresql", "restart"])
    elif "fedora" in os_families():
        # These platforms don't enable and start services on
        # installing their package, so we do that here.
        for service in [
                "postgresql-%s" % (POSTGRES_VERSION, ), "rabbitmq-server",
                "memcached", "redis"
        ]:
            run_as_root(["systemctl", "enable", service], sudo_args=['-H'])
            run_as_root(["systemctl", "start", service], sudo_args=['-H'])

    # If we imported modules after activating the virtualenv in this
    # Python process, they could end up mismatching with modules we’ve
    # already imported from outside the virtualenv.  That seems like a
    # bad idea, and empirically it can cause Python to segfault on
    # certain cffi-related imports.  Instead, start a new Python
    # process inside the virtualenv.
    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    provision_inner = os.path.join(ZULIP_PATH, "tools", "lib",
                                   "provision_inner.py")
    exec(open(activate_this).read(), dict(__file__=activate_this))
    os.execvp(provision_inner, [
        provision_inner,
        *(["--force"] if options.is_force else []),
        *(["--production-travis"] if options.is_production_travis else []),
    ])
Ejemplo n.º 6
0
if vendor == 'debian' and os_version in [] or vendor == 'ubuntu' and os_version in []:
    # For platforms without a pgroonga release, we need to build it
    # from source.
    BUILD_PGROONGA_FROM_SOURCE = True
    SYSTEM_DEPENDENCIES = UBUNTU_COMMON_APT_DEPENDENCIES + [
        pkg.format(POSTGRES_VERSION) for pkg in [
            "postgresql-{0}",
            # Dependency for building pgroonga from source
            "postgresql-server-dev-{0}",
            "libgroonga-dev",
            "libmsgpack-dev",
            "clang-9",
            "llvm-9-dev"
        ]
    ] + VENV_DEPENDENCIES
elif "debian" in os_families():
    SYSTEM_DEPENDENCIES = UBUNTU_COMMON_APT_DEPENDENCIES + [
        pkg.format(POSTGRES_VERSION) for pkg in [
            "postgresql-{0}",
            "postgresql-{0}-pgroonga",
        ]
    ] + VENV_DEPENDENCIES
elif "rhel" in os_families():
    SYSTEM_DEPENDENCIES = COMMON_YUM_DEPENDENCIES + [
        pkg.format(POSTGRES_VERSION) for pkg in [
            "postgresql{0}-server",
            "postgresql{0}",
            "postgresql{0}-devel",
            "postgresql{0}-pgroonga",
        ]
    ] + VENV_DEPENDENCIES
Ejemplo n.º 7
0
def main(options: argparse.Namespace) -> "NoReturn":

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode())
    if "debian" in os_families():
        with open("scripts/lib/setup-apt-repo", "rb") as fb:
            sha_sum.update(fb.read())
    else:
        # hash the content of setup-yum-repo*
        with open("scripts/lib/setup-yum-repo", "rb") as fb:
            sha_sum.update(fb.read())

    # hash the content of build-pgroonga if PGroonga is built from source
    if BUILD_PGROONGA_FROM_SOURCE:
        with open("scripts/lib/build-pgroonga", "rb") as fb:
            sha_sum.update(fb.read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, "a+") as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if new_apt_dependencies_hash != last_apt_dependencies_hash:
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            try:
                # Might be a failure due to network connection issues. Retrying...
                print(WARNING +
                      "Installing system dependencies failed; retrying..." +
                      ENDC)
                install_system_deps()
            except BaseException as e:
                # Suppress exception chaining
                raise e from None
        with open(apt_hash_file_path, "w") as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Binary-patch ARM64 assembly bug in OpenSSL 1.1.1b through 1.1.1h.
    # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=989604
    # https://bugs.launchpad.net/ubuntu/+source/openssl/+bug/1951279
    try:
        with open("/usr/lib/aarch64-linux-gnu/libcrypto.so.1.1", "rb") as fb:
            if b"\xbf#\x03\xd5\xfd\x07E\xf8" in fb.read():
                run_as_root(
                    [
                        "sed",
                        "-i",
                        r"s/\(\xbf#\x03\xd5\)\(\xfd\x07E\xf8\)/\2\1/",
                        "/usr/lib/aarch64-linux-gnu/libcrypto.so.1.1",
                    ],
                    env={
                        **os.environ, "LC_ALL": "C"
                    },
                )
    except FileNotFoundError:
        pass

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root([*proxy_env, "scripts/lib/install-node"], sudo_args=["-H"])
    run_as_root([*proxy_env, "scripts/lib/install-yarn"])

    if not os.access(NODE_MODULES_CACHE_PATH, os.W_OK):
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root(
            ["chown", f"{os.getuid()}:{os.getgid()}", NODE_MODULES_CACHE_PATH])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        try:
            setup_node_modules()
        except subprocess.CalledProcessError:
            print(
                FAIL +
                "`yarn install` is failing; check your network connection (and proxy settings)."
                + ENDC)
            sys.exit(1)

    # Install shellcheck.
    run_as_root(["tools/setup/install-shellcheck"])
    # Install shfmt.
    run_as_root(["tools/setup/install-shfmt"])

    setup_venvs.main()

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    if CONTINUOUS_INTEGRATION and not options.is_build_release_tarball_only:
        run_as_root(["service", "redis-server", "start"])
        run_as_root(["service", "memcached", "start"])
        run_as_root(["service", "rabbitmq-server", "start"])
        run_as_root(["service", "postgresql", "start"])
    elif "fedora" in os_families():
        # These platforms don't enable and start services on
        # installing their package, so we do that here.
        for service in [
                f"postgresql-{POSTGRESQL_VERSION}",
                "rabbitmq-server",
                "memcached",
                "redis",
        ]:
            run_as_root(["systemctl", "enable", service], sudo_args=["-H"])
            run_as_root(["systemctl", "start", service], sudo_args=["-H"])

    # If we imported modules after activating the virtualenv in this
    # Python process, they could end up mismatching with modules we’ve
    # already imported from outside the virtualenv.  That seems like a
    # bad idea, and empirically it can cause Python to segfault on
    # certain cffi-related imports.  Instead, start a new Python
    # process inside the virtualenv.
    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    provision_inner = os.path.join(ZULIP_PATH, "tools", "lib",
                                   "provision_inner.py")
    with open(activate_this) as f:
        exec(f.read(), dict(__file__=activate_this))
    os.execvp(
        provision_inner,
        [
            provision_inner,
            *(["--force"] if options.is_force else []),
            *(["--build-release-tarball-only"]
              if options.is_build_release_tarball_only else []),
            *(["--skip-dev-db-build"] if options.skip_dev_db_build else []),
        ],
    )