Ejemplo n.º 1
0
def sync_bucket_script(exclude_buckets):
    watch_directories = []
    exclude_buckets = exclude_buckets.split(',')
    for bucket_path in VEIL_BUCKETS_DIR.listdir():
        bucket_dir_name = bucket_path.basename()
        if bucket_dir_name not in exclude_buckets:
            LOGGER.info('watching bucket directory: %(bucket_path)s', {'bucket_path': bucket_path})
            watch_directories.append(bucket_path)

    mask = inotify.constants.IN_CREATE | inotify.constants.IN_MODIFY | inotify.constants.IN_DELETE | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MOVED_TO
    monitor = inotify.adapters.InotifyTrees(watch_directories, mask=mask)

    for event in monitor.event_gen():
        if event is None:
            continue
        (header, type_names, path, filename) = event
        if header.mask & inotify.constants.IN_IGNORED:
            continue
        if header.mask & inotify.constants.IN_DELETE_SELF:
            continue
        if filename.startswith('tmp') and filename.endswith('---tmp'):
            continue
        if header.mask & inotify.constants.IN_DELETE and header.mask & inotify.constants.IN_ISDIR:
            rel_path = VEIL_BUCKETS_DIR.relpathto(path)
        else:
            if (as_path(path) / filename).exists():
                rel_path = VEIL_BUCKETS_DIR.relpathto(as_path(path) / filename)
            else:
                rel_path = VEIL_BUCKETS_DIR.relpathto(path)
        sync_to_backup_mirror(rel_path, 'latest-bucket-updates', VEIL_BUCKETS_DIR)
Ejemplo n.º 2
0
def patch_utrunner(pycharm_dir):
    build_txt = (as_path(pycharm_dir) / 'build.txt')
    if not build_txt.exists():
        raise Exception('please create link to pycharm under $VEIL_HOME/env')
    patched_utrunner_py = CURRENT_DIR / 'utrunner_py'
    utrunner_py = as_path(pycharm_dir) / 'helpers' / 'pycharm' / 'utrunner.py'
    shell_execute('cp {} {}'.format(patched_utrunner_py, utrunner_py))
Ejemplo n.º 3
0
def lock_migration_scripts(purpose):
    migration_script_dir = VEIL_HOME / 'db' / purpose
    for sql_path in migration_script_dir.files('*.sql'):
        with open(sql_path) as sql_file:
            md5 = calculate_file_md5_hash(sql_file)
        lock_path = as_path(sql_path.replace('.sql', '.locked'))
        lock_path.write_text(md5)
Ejemplo n.º 4
0
 def setUp(self):
     super(FilesystemBucketTest, self).setUp()
     self.temp_dir = as_path(tempfile.gettempdir())
     override_bucket_config('test',
                            type='filesystem',
                            base_directory=self.temp_dir,
                            base_url=None)
Ejemplo n.º 5
0
def veil_host_user_resource(host, user_dir):
    username = user_dir.basename()
    initialized_file_path = '/home/{}/.veil_host_user_initialized'.format(username)
    with fabric.api.settings(host_string=host.deploys_via, user=host.ssh_user, port=host.ssh_port):
        installed = fabric.contrib.files.exists(initialized_file_path, use_sudo=True)
        dry_run_result = get_dry_run_result()
        if dry_run_result is not None:
            key = 'veil_host_user_{}?{}'.format(username, host.VEIL_ENV.name)
            dry_run_result[key] = '-' if installed else 'INSTALL'
            return

        if not installed:
            ret = fabric.api.run('getent passwd {}'.format(username), warn_only=True)
            if ret.failed:
                uid = (user_dir / 'id').text().strip()
                fabric.api.sudo('adduser --uid {uid} {username} --gecos {username} --disabled-login --shell /usr/sbin/nologin --quiet'.format(username=username, uid=uid))
        fabric.api.put(local_path=user_dir, remote_path='/home/', use_sudo=True, mode=0755)
        fabric.api.sudo('chown -R {username}:{username} /home/{username}/'.format(username=username))
        user_ssh_dir = user_dir / '.ssh'
        if user_ssh_dir.isdir():
            fabric.api.sudo('chmod 0700 /home/{}/.ssh'.format(username), user=username)
            if user_ssh_dir.listdir():
                fabric.api.sudo('chmod 0600 /home/{}/.ssh/*'.format(username), user=username)
        for f in as_path(user_dir):
            if f.endswith('.service'):
                fabric.api.put(local_path=f, remote_path='/lib/systemd/system/', use_sudo=True, mode=0644)
                fabric.api.sudo('systemctl daemon-reload')
                service_name = f.basename()
                fabric.api.sudo('systemctl enable {}'.format(service_name))
                fabric.api.sudo('systemctl start {}'.format(service_name))
        if not installed:
            fabric.api.sudo('touch {}'.format(initialized_file_path))
            fabric.api.sudo('chown {}:{} {}'.format(username, username, initialized_file_path))
Ejemplo n.º 6
0
def frontend_static_resource(frontend_root_path):
    frontend_root_path = as_path(frontend_root_path)
    if not frontend_root_path.exists():
        raise Exception('No such directory: {}'.format(frontend_root_path))
    if not (frontend_root_path / 'package.json').exists():
        raise Exception('No package.json file: {}'.format(frontend_root_path))
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        dry_run_result['frontend_static_resource'] = 'INSTALL'
        return
    shell_execute('sudo chown -R {}:{} {}'.format(CURRENT_USER,
                                                  CURRENT_USER_GROUP,
                                                  frontend_root_path))
    shell_execute('sudo npm install yarn -g', cwd=frontend_root_path)
    if not (frontend_root_path / 'node_modules').exists():
        shell_execute('sudo -u {} yarn'.format(CURRENT_USER),
                      cwd=frontend_root_path)
    shell_execute('sudo chown -R {}:{} node_modules'.format(
        CURRENT_USER, CURRENT_USER_GROUP),
                  cwd=frontend_root_path)
    dist_path = frontend_root_path / 'dist'
    if dist_path.exists():
        shell_execute('sudo chown -R {}:{} {}'.format(CURRENT_USER,
                                                      CURRENT_USER_GROUP,
                                                      dist_path))
    shell_execute('sudo -u {} yarn install'.format(CURRENT_USER),
                  cwd=frontend_root_path)
    shell_execute('sudo -u {} yarn run build'.format(CURRENT_USER),
                  cwd=frontend_root_path)
Ejemplo n.º 7
0
def get_static_file_hash(path):
    assert external_static_files_directory
    if static_file_hashes.get(path) is None:
        static_file_path = as_path(external_static_files_directory) / path
        try:
            with open(static_file_path) as f:
                hash = calculate_file_md5_hash(f, hex=True)
                static_file_hashes[path] = hash
        except Exception:
            LOGGER.error('Could not open static file: %(static_file_path)s', {'static_file_path': static_file_path})
    return static_file_hashes.get(path)
Ejemplo n.º 8
0
def write_inline_static_file(page_handler, suffix, content):
    assert inline_static_files_directory
    hash = hashlib.md5(to_str(content)).hexdigest()
    dir = as_path(inline_static_files_directory)
    if not dir.exists():
        dir.mkdir(0755)
    inline_static_file = dir / hash
    if not inline_static_file.exists():
        inline_static_file.write_text(to_str(content))
    page_name = page_handler.__name__.replace("_widget", "").replace("_page", "").replace("_", "-")
    pseudo_file_name = "{}.{}".format(page_name, suffix)
    return "v-{}/{}".format(hash, pseudo_file_name)
Ejemplo n.º 9
0
def get_static_file_hash(path):
    assert external_static_files_directory
    if static_file_hashes.get(path) is None:
        static_file_path = as_path(external_static_files_directory) / path
        try:
            with open(static_file_path) as f:
                hash = calculate_file_md5_hash(f, hex=True)
                static_file_hashes[path] = hash
        except Exception:
            LOGGER.error('Could not open static file: %(static_file_path)s',
                         {'static_file_path': static_file_path})
    return static_file_hashes.get(path)
Ejemplo n.º 10
0
 def __init__(self, endpoint=None, local=False, config_dir=None):
     if local:
         self.client = pylxd.Client()
     else:
         assert endpoint is not None, 'non local should provide endpoint'
         assert config_dir is not None, 'non local should provide config_dir'
         config_file = as_path(config_dir) / '.config'
         config = load_config_from(config_file, 'lxd_trusted_password')
         cert = (os.path.expanduser('{}/lxd.crt'.format(config_dir)), os.path.expanduser('{}/lxd.key'.format(config_dir)))
         self.client = pylxd.Client(endpoint=endpoint, cert=cert, verify=False, timeout=(3.05, 27))
         if not self.client.trusted:
             self.client.authenticate(config.lxd_trusted_password)
     assert self.client.trusted
Ejemplo n.º 11
0
def download_baseline(veil_env_name, remote_path, baseline_path):
    if isinstance(baseline_path, basestring):
        baseline_path = as_path(baseline_path)
    baseline_path.makedirs(0755)
    backup_mirror = get_veil_server(veil_env_name, '@guard').backup_mirror
    if not backup_mirror:
        raise Exception('backup mirror not found on server {}/{}'.format(veil_env_name, '@guard'))
    if not hasattr(backup_mirror, 'domain'):
        backup_mirror.domain = 'ljhost-01.dmright.com'
    backup_mirror_path = '~/backup_mirror/{}'.format(veil_env_name)
    shell_execute('''rsync -avhPz -e "ssh -p {} -T -x -o Compression=yes -o StrictHostKeyChecking=no" --delete --bwlimit={} {}@{}:{}/{}/ {}/'''.format(
        backup_mirror.ssh_port, backup_mirror.bandwidth_limit, backup_mirror.ssh_user, backup_mirror.domain, backup_mirror_path, remote_path,
        baseline_path), debug=True)
Ejemplo n.º 12
0
def download_baseline(veil_env_name, purpose, baseline_path):
    backup_mirror = get_veil_env(veil_env_name).backup_mirror
    if not backup_mirror:
        raise Exception('backup mirror not found in veil env. {}'.format(veil_env_name))

    if isinstance(baseline_path, basestring):
        baseline_path = as_path(baseline_path)
    baseline_path.makedirs(0755)

    backup_mirror_path = VEIL_BACKUP_MIRROR_ROOT / veil_env_name / 'latest-database-recover'
    shell_execute('''rsync -avzhPH -e "ssh -p {} -T -x -o Compression=no -o StrictHostKeyChecking=no" --delete --bwlimit={} {}@{}:{}/{}/ {}/'''.format(
        backup_mirror.ssh_port, backup_mirror.bandwidth_limit, backup_mirror.ssh_user, backup_mirror.domain, backup_mirror_path, purpose, baseline_path),
        debug=True)
Ejemplo n.º 13
0
def lxc_container_name_servers_resource(container_name, name_servers):
    container_rootfs_path = as_path('/var/lib/lxc/') / container_name / 'rootfs'
    resolve_conf_path = container_rootfs_path / 'etc' / 'resolvconf' / 'resolv.conf.d' / 'tail'
    config_content = '\n'.join('nameserver {}'.format(name_server) for name_server in name_servers.split(','))
    installed = config_content == resolve_conf_path.text()
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        key = 'lxc_container_name servers_resource?container_name={}&name_servers={}'.find(container_name, name_servers)
        dry_run_result[key] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('set container name servers: in %(container_name)s to %(name_servers)s', {'container_name': container_name, 'name_servers': name_servers})
    resolve_conf_path.write_text(config_content)
Ejemplo n.º 14
0
def download_baseline(veil_env_name, remote_path, baseline_path):
    if isinstance(baseline_path, basestring):
        baseline_path = as_path(baseline_path)
    baseline_path.makedirs(0755)
    backup_mirror = get_veil_server(veil_env_name, 'guard').backup_mirror
    if not backup_mirror:
        raise Exception('backup mirror not found on server {}/{}'.format(
            veil_env_name, 'guard'))
    if not hasattr(backup_mirror, 'domain'):
        backup_mirror.domain = 'ljhost-01.dmright.com'
    backup_mirror_path = '~/backup_mirror/{}'.format(veil_env_name)
    shell_execute(
        '''rsync -avhPz -e "ssh -p {} -T -x -o Compression=yes -o StrictHostKeyChecking=no" --delete --bwlimit={} {}@{}:{}/{}/ {}/'''
        .format(backup_mirror.ssh_port, backup_mirror.bandwidth_limit,
                backup_mirror.ssh_user, backup_mirror.domain,
                backup_mirror_path, remote_path, baseline_path),
        debug=True)
Ejemplo n.º 15
0
def veil_hosts_resource(veil_env_name, config_dir):
    resources = []
    hosts = list_veil_hosts(veil_env_name)
    for host in hosts:
        fabric.api.env.host_string = host.deploys_via
        if is_initialized_for_another_same_base_instance(host):
            raise Exception(
                'Can not deploy {} on host {} as it is initialized for another same-base instance!!!'
                .format(host.VEIL_ENV.name, host.name))
        if host.base_name not in hosts_to_install:
            resources.extend([
                veil_host_onetime_config_resource(host=host),
                veil_host_config_resource(host=host, config_dir=config_dir),
                veil_host_application_config_resource(host=host,
                                                      config_dir=config_dir),
                veil_host_codebase_resource(host=host)
            ])
            host_users_dir = as_path(config_dir / host.VEIL_ENV.name /
                                     'hosts' / host.base_name / 'USERS')
            if host_users_dir.exists():
                print(cyan('Install Veil host users resource'))
                for user_dir in host_users_dir.dirs():
                    resources.append(
                        veil_host_user_resource(host=host, user_dir=user_dir))
            if any(h.with_user_editor for h in hosts
                   if h.base_name == host.base_name):
                print(cyan('Install Veil host user editor resource'))
                resources.append(
                    veil_host_user_editor_additional_resource(host=host))
            resources.append(veil_host_iptables_rules_resource(host=host))
            hosts_to_install.append(host.base_name)
        for server in host.server_list:
            resources.extend([
                veil_host_directory_resource(host=host,
                                             remote_path=server.etc_dir),
                veil_host_directory_resource(host=host,
                                             remote_path=server.log_dir,
                                             owner=host.ssh_user,
                                             owner_group=host.ssh_user_group,
                                             mode=0755),
                veil_container_resource(host=host,
                                        server=server,
                                        config_dir=config_dir)
            ])
    return resources
Ejemplo n.º 16
0
def lxc_container_network_resource(container_name, ip_address, gateway):
    container_rootfs_path = as_path('/var/lib/lxc/') / container_name / 'rootfs'
    network_interfaces_path = container_rootfs_path / 'etc' / 'network' / 'interfaces'
    config_content = render_config('interfaces.j2', ip_address=ip_address, gateway=gateway)
    installed = config_content == network_interfaces_path.text()
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        key = 'lxc_container_network?container_name={}&ip_address={}'.find(container_name, ip_address)
        dry_run_result[key] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('set container network: in %(container_name)s to %(ip_address)s via %(gateway)s', {
        'container_name': container_name,
        'ip_address': ip_address,
        'gateway': gateway
    })
    network_interfaces_path.write_text(config_content)
Ejemplo n.º 17
0
def lxc_container_timezone_resource(container_name, timezone):
    container_rootfs_path = as_path('/var/lib/lxc/') / container_name / 'rootfs'
    etc_timezone_path = container_rootfs_path / 'etc' / 'timezone'
    installed = etc_timezone_path.exists() and timezone == etc_timezone_path.text()
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        key = 'lxc_container_timezone?container_name={}&timezone={}'.find(container_name, timezone)
        dry_run_result[key] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('set container time zone: in %(container_name)s to %(timezone)s', {
        'container_name': container_name,
        'timezone': timezone
    })
    if not etc_timezone_path.exists():
        etc_timezone_path.touch()
    etc_timezone_path.write_text(timezone)
    shell_execute('chroot {} dpkg-reconfigure --frontend noninteractive tzdata'.format(container_rootfs_path), capture=True)
def lxc_container_timezone_resource(container_name, timezone):
    container_rootfs_path = as_path('/var/lib/lxc/') / container_name / 'rootfs'
    etc_localtime_path = container_rootfs_path / 'etc' / 'localtime'
    etc_timezone_path = container_rootfs_path / 'etc' / 'timezone'
    installed = etc_timezone_path.exists() and timezone == etc_timezone_path.text()
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        key = 'lxc_container_timezone?container_name={}&timezone={}'.find(container_name, timezone)
        dry_run_result[key] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('set container time zone: in %(container_name)s to %(timezone)s', {
        'container_name': container_name,
        'timezone': timezone
    })
    if not etc_timezone_path.exists():
        etc_timezone_path.touch()
    shell_execute('ln -sf /usr/share/zoneinfo/{} {}'.format(timezone, etc_localtime_path))
    shell_execute('chroot {} dpkg-reconfigure --frontend noninteractive tzdata'.format(container_rootfs_path),
                  capture=True)
Ejemplo n.º 19
0
def frontend_static_resource(frontend_root_path):
    frontend_root_path = as_path(frontend_root_path)
    if not frontend_root_path.exists():
        raise Exception('No such directory: {}'.format(frontend_root_path))
    package_desc_file_path = frontend_root_path / 'package.json'
    if not package_desc_file_path.exists():
        raise Exception('No package.json file in: {}'.format(frontend_root_path))

    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        dry_run_result['frontend_static_resource'] = 'INSTALL'
        return

    shell_execute('sudo npm install yarn -g', cwd=frontend_root_path)

    node_modules_path = frontend_root_path / 'node_modules'
    if not node_modules_path.exists():
        LOGGER.info('node modules path does not exist, install')
        shell_execute('yarn', cwd=frontend_root_path)

    shell_execute('yarn install', cwd=frontend_root_path)
    shell_execute('yarn run build', cwd=frontend_root_path)
from __future__ import unicode_literals, print_function, division
import logging
from veil_component import as_path
from veil_installer import *
from veil.utility.shell import *
from .os_package_installer import os_package_resource

LOGGER = logging.getLogger(__name__)
ETC_APT = as_path('/etc/apt')


@atomic_installer
def apt_repository_resource(name, key_url, definition, version=None):
    install_apt_repository_resource(name, key_url, definition, version)


def install_apt_repository_resource(name, key_url, definition, version=None):
    installed = is_os_package_repository_installed(name, version)
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        dry_run_result['apt_repository?{}{}'.format(
            name)] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('installing apt repository: %(name)s, %(version)s ...', {
        'name': name,
        'version': version
    })
    shell_execute(
        'wget --inet4-only -q -O - {} | apt-key add -'.format(key_url),
Ejemplo n.º 21
0
def init_lxd_daemon():
    config_file = as_path(get_env_config_dir()) / '.config'
    config = load_config_from(config_file, 'lxd_trusted_password')
    fabric.api.run('lxd init --auto --network-address=[::] --trust-password={}'.format(config.lxd_trusted_password))
Ejemplo n.º 22
0
from __future__ import unicode_literals, print_function, division
import os
from veil_component import as_path
from veil.utility.shell import *
from veil.frontend.cli import *

CURRENT_DIR = as_path(os.path.dirname(__file__))

@script('rebuild-index')
def rebuild_index():
    shell_execute('rm -rf ~/.PyCharm20/system/caches')
    shell_execute('rm -rf ~/.PyCharm20/system/index')


@script('patch-utrunner')
def patch_utrunner(pycharm_dir):
    build_txt = (as_path(pycharm_dir) / 'build.txt')
    if not build_txt.exists():
        raise Exception('please create link to pycharm under $VEIL_HOME/env')
    patched_utrunner_py = CURRENT_DIR / 'utrunner_py'
    utrunner_py = as_path(pycharm_dir) / 'helpers' / 'pycharm' / 'utrunner.py'
    shell_execute('cp {} {}'.format(patched_utrunner_py, utrunner_py))
Ejemplo n.º 23
0
 def __init__(self, base_directory, base_url):
     super(FilesystemBucket, self).__init__()
     self.base_directory = as_path(base_directory)
     self.base_url = base_url
Ejemplo n.º 24
0
def restore_from_baseline(veil_env_name, force_download='FALSE', relative_path=None, host_name=None):
    """
    Examples:
        sudo -E veil :xxx-staging/db restore-from-baseline xxx-public TRUE data/xxx-postgresql-9.4
        sudo -E veil restore-from-baseline xxx-public TRUE data/xxx-postgresql-9.4
        sudo -E veil restore-from-baseline xxx-public FALSE data/xxx-postgresql-9.4
    """
    if not host_name:
        for server in list_veil_servers(veil_env_name):
            if server.mount_data_dir:
                host_name = server.host_name
                break
    host = get_veil_host(veil_env_name, host_name)
    if not relative_path:
        assert any(server.mount_data_dir for server in host.server_list), 'Please specify relative_path'
        relative_path = host.var_dir.relpathto(host.data_dir)
    if relative_path == '*':
        remote_path = as_path('latest') / host.base_name
        baseline_path = BASELINE_DIR / veil_env_name / host.base_name
        restored_to_path = VEIL_VAR_DIR
    else:
        remote_path = as_path('latest') / host.base_name / relative_path
        baseline_path = BASELINE_DIR / veil_env_name / host.base_name / relative_path
        restored_to_path = VEIL_VAR_DIR / relative_path
    if force_download.upper() == 'TRUE' or not baseline_path.exists():
        download_baseline(veil_env_name, remote_path, baseline_path)

    shell_execute('veil down', debug=True)
    shell_execute('rsync -avh --delete {}/ {}/'.format(baseline_path, restored_to_path), debug=True)
    shell_execute('veil install-server', debug=True)

    purposes = []
    if VEIL_DATA_DIR.startswith(restored_to_path):
        for pg_data_dir in VEIL_DATA_DIR.dirs('*-postgresql-*'):
            purposes.append(pg_data_dir.name.split('-postgresql-', 1)[0])
    elif restored_to_path.startswith(VEIL_DATA_DIR) and '-postgresql-' in restored_to_path.name:
        purposes.append(restored_to_path.name.split('-postgresql-', 1)[0])
    print('found postgresql purposes: {}'.format(purposes))
    for purpose in purposes:
        # set db conf permission
        config = postgresql_maintenance_config(purpose)
        shell_execute('chown -f {}:{} *'.format(config.owner, config.owner), cwd=VEIL_ETC_DIR / '{}-postgresql-{}'.format(purpose, config.version), debug=True)
    shell_execute('veil up --daemonize', debug=True)
    for purpose in purposes:
        config = postgresql_maintenance_config(purpose)
        config.update(database_client_config(purpose))
        while True:
            # set db owner password and run in a loop to wait PG to start
            try:
                shell_execute(
                    '''sudo -u dejavu psql -p {} -d template1 -c "ALTER ROLE {} WITH PASSWORD '{}'"'''.format(config.port, config.owner, config.owner_password),
                    debug=True)
                # set db user password
                shell_execute(
                    '''sudo -u dejavu psql -p {} -d template1 -c "ALTER ROLE {} WITH PASSWORD '{}'"'''.format(config.port, config.user, config.password),
                    debug=True, capture=True)
            except Exception:
                print('retrying')
                sleep(2)
            else:
                break
    shell_execute('veil migrate', debug=True)

    shell_execute('veil down', debug=True)
Ejemplo n.º 25
0
from __future__ import unicode_literals, print_function, division
from cStringIO import StringIO
import contextlib
import os
import fabric.api
import fabric.contrib.files
from veil_component import as_path
from veil_installer import *
from veil.server.config import *
from .server_installer import is_container_running

CURRENT_DIR = as_path(os.path.dirname(__file__))


@composite_installer
def veil_container_resource(host, server, config_dir):
    resources = [
        veil_container_lxc_resource(host=host, server=server),
        veil_container_config_resource(server=server, config_dir=config_dir),
        veil_container_onetime_config_resource(server=server)
    ]
    return resources


def get_remote_file_content(remote_path):
    content = None
    if fabric.contrib.files.exists(remote_path, use_sudo=True):
        with contextlib.closing(StringIO()) as f:
            fabric.api.get(remote_path, local_path=f, use_sudo=True)
            content = f.getvalue()
    return content
Ejemplo n.º 26
0
def restore_from_baseline(veil_env_name,
                          force_download='FALSE',
                          relative_path=None,
                          host_name=None):
    """
    Examples:
        sudo -E veil :xxx-staging/db restore-from-baseline xxx-public TRUE data/xxx-postgresql-9.4
        sudo -E veil restore-from-baseline xxx-public TRUE data/xxx-postgresql-9.4
        sudo -E veil restore-from-baseline xxx-public FALSE data/xxx-postgresql-9.4
    """
    if not host_name:
        for server in list_veil_servers(veil_env_name):
            if server.mount_data_dir:
                host_name = server.host_name
                break
    host = get_veil_host(veil_env_name, host_name)
    if not relative_path:
        assert any(
            server.mount_data_dir
            for server in host.server_list), 'Please specify relative_path'
        relative_path = host.var_dir.relpathto(host.data_dir)
    if relative_path == '*':
        remote_path = as_path('latest') / host.base_name
        baseline_path = BASELINE_DIR / veil_env_name / host.base_name
        restored_to_path = VEIL_VAR_DIR
    else:
        remote_path = as_path('latest') / host.base_name / relative_path
        baseline_path = BASELINE_DIR / veil_env_name / host.base_name / relative_path
        restored_to_path = VEIL_VAR_DIR / relative_path
    if force_download.upper() == 'TRUE' or not baseline_path.exists():
        download_baseline(veil_env_name, remote_path, baseline_path)

    shell_execute('veil down', debug=True)
    shell_execute('rsync -avh --delete {}/ {}/'.format(baseline_path,
                                                       restored_to_path),
                  debug=True)
    shell_execute('veil install-server', debug=True)

    purposes = []
    if VEIL_DATA_DIR.startswith(restored_to_path):
        for pg_data_dir in VEIL_DATA_DIR.dirs('*-postgresql-*'):
            purposes.append(pg_data_dir.name.split('-postgresql-', 1)[0])
    elif restored_to_path.startswith(
            VEIL_DATA_DIR) and '-postgresql-' in restored_to_path.name:
        purposes.append(restored_to_path.name.split('-postgresql-', 1)[0])
    print('found postgresql purposes: {}'.format(purposes))
    for purpose in purposes:
        # set db conf permission
        config = postgresql_maintenance_config(purpose)
        shell_execute('chown -f {}:{} *'.format(config.owner, config.owner),
                      cwd=VEIL_ETC_DIR /
                      '{}-postgresql-{}'.format(purpose, config.version),
                      debug=True)
    shell_execute('veil up --daemonize', debug=True)
    for purpose in purposes:
        config = postgresql_maintenance_config(purpose)
        config.update(database_client_config(purpose))
        while True:
            # set db owner password and run in a loop to wait PG to start
            try:
                shell_execute(
                    '''sudo -u dejavu psql -p {} -d template1 -c "ALTER ROLE {} WITH PASSWORD '{}'"'''
                    .format(config.port, config.owner, config.owner_password),
                    debug=True)
                # set db user password
                shell_execute(
                    '''sudo -u dejavu psql -p {} -d template1 -c "ALTER ROLE {} WITH PASSWORD '{}'"'''
                    .format(config.port, config.user, config.password),
                    debug=True,
                    capture=True)
            except Exception:
                print('retrying')
                sleep(2)
            else:
                break
    shell_execute('veil migrate', debug=True)

    shell_execute('veil down', debug=True)
Ejemplo n.º 27
0
 def __init__(self, base_directory, base_url):
     super(FilesystemBucket, self).__init__()
     self.base_directory = as_path(base_directory)
     self.base_url = base_url
from __future__ import unicode_literals, print_function, division
import logging
from veil_component import as_path
from veil_installer import *
from veil.utility.shell import *
from .os_package_installer import os_package_resource

LOGGER = logging.getLogger(__name__)
ETC_APT = as_path('/etc/apt')


@atomic_installer
def apt_repository_resource(name, key_url, definition, version=None):
    install_apt_repository_resource(name, key_url, definition, version)


def install_apt_repository_resource(name, key_url, definition, version=None):
    installed = is_os_package_repository_installed(name, version)
    dry_run_result = get_dry_run_result()
    if dry_run_result is not None:
        dry_run_result['apt_repository?{}{}'.format(name)] = '-' if installed else 'INSTALL'
        return
    if installed:
        return
    LOGGER.info('installing apt repository: %(name)s, %(version)s ...', {'name': name, 'version': version})
    shell_execute('wget --inet4-only -q -O - {} | sudo apt-key add -'.format(key_url), capture=True)
    shell_execute('echo "{}" | sudo tee /etc/apt/sources.list.d/{}.list'.format(definition, name), capture=True)
    # apt update the added repository
    shell_execute('sudo apt update -o Dir::Etc::sourcelist="sources.list.d/{}.list" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"'.format(name),
                  capture=True, debug=True)
Ejemplo n.º 29
0
 def setUp(self):
     super(FilesystemBucketTest, self).setUp()
     self.temp_dir = as_path(tempfile.gettempdir())
     override_bucket_config('test', type='filesystem', base_directory=self.temp_dir, base_url=None)