def bootstrap(force=0): """ Installs all the necessary packages necessary for managing virtual environments with pip. """ force = int(force) if has_pip() and has_virtualenv() and not force: return _env = type(env)(env) _env.pip_path_versioned = _env.pip_path % _env run_or_dryrun( 'wget http://peak.telecommunity.com/dist/ez_setup.py -O /tmp/ez_setup.py' ) #sudo_or_dryrun('python{pip_python_version} /tmp/ez_setup.py -U setuptools'.format(**env)) with settings(warn_only=True): sudo_or_dryrun( 'python{pip_python_version} /tmp/ez_setup.py -U setuptools'.format( **_env)) sudo_or_dryrun('easy_install -U pip') if env.pip_bootstrap_packages: for package in _env.pip_bootstrap_packages: _env.package = package sudo_or_dryrun( '{pip_path_versioned} install --upgrade {package}'.format( **_env))
def set_collation_mysql(name=None, site=None): from burlap.dj import set_db set_db(name=name, site=site) set_root_login() cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "--execute='ALTER DATABASE %(db_name)s CHARACTER SET %(db_mysql_character_set)s COLLATE %(db_mysql_collate)s;'") % env run_or_dryrun(cmd)
def migrate(app='', migration='', site=None, fake=0, ignore_errors=0, skip_databases=None, database=None, migrate_apps='', delete_ghosts=1): """ Runs the standard South migrate command for one or more sites. """ ignore_errors = int(ignore_errors) delete_ghosts = int(delete_ghosts) skip_databases = (skip_databases or '') if isinstance(skip_databases, basestring): skip_databases = [_.strip() for _ in skip_databases.split(',') if _.strip()] migrate_apps = migrate_apps or '' migrate_apps = [ _.strip().split('.')[-1] for _ in migrate_apps.strip().split(',') if _.strip() ] if app: migrate_apps.append(app) render_remote_paths() _env = type(env)(env) _env.django_migrate_migration = migration or '' _env.django_migrate_fake_str = '--fake' if int(fake) else '' _env.django_migrate_database = '--database=%s' % database if database else '' _env.delete_ghosts = '--delete-ghost-migrations' if delete_ghosts else '' for site, site_data in iter_unique_databases(site=site): print('-'*80, file=sys.stderr) print('site:', site, file=sys.stderr) if env.available_sites_by_host: hostname = common.get_current_hostname() sites_on_host = env.available_sites_by_host.get(hostname, []) if sites_on_host and site not in sites_on_host: print('skipping site:', site, sites_on_host, file=sys.stderr) continue print('migrate_apps:', migrate_apps, file=sys.stderr) if migrate_apps: _env.django_migrate_app = ' '.join(migrate_apps) else: _env.django_migrate_app = '' _env.SITE = site cmd = ( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; ' '%(django_manage)s migrate --noinput --traceback %(django_migrate_database)s %(delete_ghosts)s %(django_migrate_app)s %(django_migrate_migration)s ' '%(django_migrate_fake_str)s' ) % _env cmd = cmd.strip() with settings(warn_only=ignore_errors): run_or_dryrun(cmd)
def createsuperuser(username='******', email=None, password=None, site=None): """ Runs the Django createsuperuser management command. """ from burlap.dj import render_remote_paths set_site(site) render_remote_paths() env.db_createsuperuser_username = username env.db_createsuperuser_email = email or username run_or_dryrun('export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s createsuperuser --username=%(db_createsuperuser_username)s --email=%(db_createsuperuser_email)s' % env)
def update(package='', ignore_errors=0, no_deps=0, all=0, mirrors=1): """ Updates the local cache of pip packages. If all=1, skips check of host and simply updates everything. """ assert env[ROLE] ignore_errors = int(ignore_errors) env.pip_path_versioned = env.pip_path % env env.pip_local_cache_dir = env.pip_local_cache_dir_template % env env.pip_cache_dir = env.pip_local_cache_dir if not os.path.isdir(env.pip_cache_dir): os.makedirs(env.pip_cache_dir) env.pip_package = (package or '').strip() env.pip_no_deps = '--no-deps' if int(no_deps) else '' env.pip_build_dir = tempfile.mkdtemp() # Clear build directory in case it wasn't properly cleaned up previously. cmd = 'rm -Rf %(pip_build_directory)s' % env if env.is_local: run_or_dryrun(cmd) else: sudo_or_dryrun(cmd) with settings(warn_only=ignore_errors): if package: # Download a single specific package. cmd = env.pip_update_command % env if not int(mirrors): cmd = cmd.replace('--use-mirrors', '') local_or_dryrun(cmd) else: # Download each package in a requirements file. # Note, specifying the requirements file in the command isn't properly # supported by pip, thus we have to parse the file itself and send each # to pip separately. if int(all): packages = list(iter_pip_requirements()) else: packages = [k for k, v in check()] for package in packages: env.pip_package = package.strip() cmd = env.pip_update_command % env if not int(mirrors): cmd = cmd.replace('--use-mirrors', '') local_or_dryrun(cmd)
def dump(dest_dir=None, to_local=None, from_local=0, archive=0, dump_fn=None): """ Exports the target database to a single transportable file on the localhost, appropriate for loading using load(). """ from burlap.dj import set_db from_local = int(from_local) set_db() if dest_dir: env.db_dump_dest_dir = dest_dir env.db_date = datetime.date.today().strftime('%Y%m%d') #env.db_dump_fn = dump_fn or (env.db_dump_fn_template % env) env.db_dump_fn = get_default_db_fn(dump_fn or env.db_dump_fn_template).strip() if to_local is None and not env.is_local: to_local = 1 if env.db_dump_command: run_or_dryrun(env.db_dump_command % env) elif 'postgres' in env.db_engine or 'postgis' in env.db_engine: assert env.db_schemas, \ 'Please specify the list of schemas to dump in db_schemas.' env.db_schemas_str = ' '.join('-n %s' % _ for _ in env.db_schemas) cmd = env.db_postgresql_dump_command % env #print 'db_host:',env.db_host if env.is_local or from_local: local_or_dryrun(cmd) else: sudo_or_dryrun(cmd) elif 'mysql' in env.db_engine: cmd = env.db_mysql_dump_command % env if env.is_local: local_or_dryrun(cmd) else: sudo_or_dryrun(cmd) else: raise NotImplemented # Download the database dump file on the remote host to localhost. if not from_local and (0 if to_local is None else int(to_local)) and not env.is_local: cmd = ('rsync -rvz --progress --recursive --no-p --no-g --rsh "ssh -o StrictHostKeyChecking=no -i %(key_filename)s" %(user)s@%(host_string)s:%(db_dump_fn)s %(db_dump_fn)s') % env local_or_dryrun(cmd) if to_local and int(archive): db_fn = render_fn(env.db_dump_fn) env.db_archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1]) local_or_dryrun('mv %s %s' % (db_fn, env.db_archive_fn)) return env.db_dump_fn
def uninstall(package): from burlap.dj import render_remote_paths render_remote_paths() if env.pip_virtual_env_dir_template: env.pip_virtual_env_dir = env.pip_virtual_env_dir_template % env env.pip_local_cache_dir = env.pip_local_cache_dir_template % env env.pip_package = package if env.is_local: run_or_dryrun(env.pip_uninstall_command % env) else: sudo_or_dryrun(env.pip_uninstall_command % env)
def update_install(clean=0, pip_requirements_fn=None, virtualenv_dir=None, user=None, group=None, perms=None): try: from burlap.dj import render_remote_paths except ImportError: render_remote_paths = None _env = type(env)(env) pip_requirements_fn = pip_requirements_fn or env.pip_requirements_fn bootstrap(force=clean) init(clean=clean, virtualenv_dir=virtualenv_dir, check_permissions=False) req_fn = find_template(pip_requirements_fn) assert req_fn, 'Could not find file: %s' % pip_requirements_fn _env.pip_remote_requirements_fn = '/tmp/pip-requirements.txt' put_or_dryrun( local_path=req_fn, remote_path=_env.pip_remote_requirements_fn, ) if render_remote_paths: render_remote_paths() if virtualenv_dir: _env.virtualenv_dir = virtualenv_dir elif _env.pip_virtual_env_dir_template: _env.virtualenv_dir = _env.pip_virtual_env_dir_template % _env _env.pip_update_install_command = "%(virtualenv_dir)s/bin/pip install -r %(pip_remote_requirements_fn)s" if _env.is_local: run_or_dryrun(_env.pip_update_install_command % _env) else: sudo_or_dryrun(_env.pip_update_install_command % _env) if not _env.is_local and (_env.pip_check_permissions or user or group or perms): set_virtualenv_permissions( user=user, group=group, perms=perms, virtualenv_dir=virtualenv_dir, )
def database_files_dump(site=None): """ Runs the Django management command to export files stored in the database to the filesystem. Assumes the app django_database_files is installed. """ from burlap.dj import render_remote_paths set_site(site or env.SITE) render_remote_paths() cmd = 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s database_files_dump' % env if env.is_local: local_or_dryrun(cmd) else: run_or_dryrun(cmd)
def set_max_mysql_packet_size(do_set=1): verbose = common.get_verbose() from burlap.dj import set_db do_set = int(do_set) if do_set: set_db(site=env.SITE, role=env.ROLE) # Raise max packet limitation. run_or_dryrun( ('mysql -v -h %(db_host)s -D %(db_name)s -u %(db_root_user)s ' '-p"%(db_root_password)s" --execute="SET global ' 'net_buffer_length=%(db_mysql_net_buffer_length)s; SET global ' 'max_allowed_packet=%(db_mysql_max_allowed_packet)s;"') % env)
def createsuperuser(username='******', email=None, password=None, site=None): """ Runs the Django createsuperuser management command. """ from burlap.dj import render_remote_paths set_site(site) render_remote_paths() env.db_createsuperuser_username = username env.db_createsuperuser_email = email or username run_or_dryrun( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s createsuperuser --username=%(db_createsuperuser_username)s --email=%(db_createsuperuser_email)s' % env)
def database_files_dump(site=None): """ Runs the Django management command to export files stored in the database to the filesystem. Assumes the app django_database_files is installed. """ from burlap.dj import render_remote_paths set_site(site or env.SITE) render_remote_paths() cmd = 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s database_files_dump' % env if env.is_local: local_or_dryrun(cmd) else: run_or_dryrun(cmd)
def write_pgpass(self, name=None, use_sudo=0, verbose=1, commands_only=0): """ Write the file used to store login credentials for PostgreSQL. """ from burlap.dj import set_db from burlap.file import appendline use_sudo = int(use_sudo) verbose = common.get_verbose() commands_only = int(commands_only) if name: set_db(name=name) cmds = [] cmds.append( 'touch {db_postgresql_pgass_path}'.format( db_postgresql_pgass_path=env.db_postgresql_pgass_path)) cmds.append( 'chmod {db_postgresql_pgpass_chmod} {db_postgresql_pgass_path}'.format( db_postgresql_pgass_path=env.db_postgresql_pgass_path, db_postgresql_pgpass_chmod=env.db_postgresql_pgpass_chmod)) pgpass_kwargs = dict( db_host=env.db_host, db_port=env.db_postgresql_port, db_user=env.db_user, db_password=env.db_password, ) pgpass_line = '{db_host}:{db_port}:*:{db_user}:{db_password}'\ .format(**pgpass_kwargs) cmds.extend(appendline( fqfn=env.db_postgresql_pgass_path, line=pgpass_line, use_sudo=use_sudo, commands_only=1, verbose=0)) if not commands_only: for cmd in cmds: if verbose: print(cmd) if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return cmds
def run_paths(paths, cmd_template, max_retries=3): paths = list(paths) error_counts = defaultdict(int) # {path:count} terminal = set() while paths: path = paths.pop(0) app_name = re.findall(r'/([^/]+)/sql/', path)[0] if apps and app_name not in apps: continue with settings(warn_only=True): put_or_dryrun(local_path=path) cmd = cmd_template % env error_code = run_or_dryrun(cmd) if error_code: error_counts[path] += 1 if error_counts[path] < max_retries: paths.append(path) else: terminal.add(path) if terminal: print('%i files could not be loaded.' % len(terminal), file=sys.stderr) for path in sorted(list(terminal)): print(path, file=sys.stderr) print(file=sys.stderr)
def execute_sql(fn, name='default', site=None): """ Executes an arbitrary SQL file. """ from burlap.dj import set_db from burlap.db import load_db_set assert os.path.isfile(fn), 'Missing file: %s' % fn site_summary = {} # {site: ret} for site, site_data in common.iter_sites(site=site, no_secure=True): try: set_db(name=name, site=site) load_db_set(name=name) env.SITE = site put_or_dryrun(local_path=fn) with settings(warn_only=True): ret = None if 'postgres' in env.db_engine or 'postgis' in env.db_engine: ret = run_or_dryrun( "psql --host=%(db_host)s --user=%(db_user)s -d %(db_name)s -f %(put_remote_path)s" % env) elif 'mysql' in env.db_engine: ret = run_or_dryrun( "mysql -h %(db_host)s -u %(db_user)s -p'%(db_password)s' %(db_name)s < %(put_remote_path)s" % env) else: raise NotImplementedError('Unknown database type: %s' % env.db_engine) print('ret:', ret) site_summary[site] = ret except KeyError as e: site_summary[site] = 'Error: %s' % str(e) pass print('-' * 80) print('Site Summary:') for site, ret in sorted(site_summary.items(), key=lambda o: o[0]): print(site, ret)
def deploy_cura(): """ Updates files for the Printrbot manager. e.g. fab printer deploy_cura """ # Ensure our 3d configuration options are up-to-date. run_or_dryrun( 'mkdir -p ~/git; cd ~/git; git clone https://github.com/chrisspen/3d-printer-profiles.git; cd 3d-printer-profiles; git pull' ) # Ensure our 3d models are up-to-date. sudo_or_dryrun('mkdir -p %(project_home)s/models/printable' % env) sudo_or_dryrun('chown -R %(user)s:%(user)s %(project_home)s' % env) local_or_dryrun( 'rsync -avz --delete --rsh "ssh -t -o StrictHostKeyChecking=no -i %(key_filename)s" models/printable %(user)s@%(host_string)s:%(project_home)s/models/' % env)
def appendline(fqfn, line, use_sudo=0, verbose=1, commands_only=0): """ Appends the given line to the given file only if the line does not already exist in the file. """ verbose = int(verbose) commands_only = int(commands_only) use_sudo = int(use_sudo) kwargs = dict(fqfn=fqfn, line=line) cmd = 'grep -qF "{line}" {fqfn} || echo "{line}" >> {fqfn}'.format(**kwargs) if verbose: print(cmd) if not commands_only: if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return [cmd]
def execute_sql(fn, name='default', site=None): """ Executes an arbitrary SQL file. """ from burlap.dj import set_db from burlap.db import load_db_set assert os.path.isfile(fn), 'Missing file: %s' % fn site_summary = {} # {site: ret} for site, site_data in common.iter_sites(site=site, no_secure=True): try: set_db(name=name, site=site) load_db_set(name=name) env.SITE = site put_or_dryrun(local_path=fn) with settings(warn_only=True): ret = None if 'postgres' in env.db_engine or 'postgis' in env.db_engine: ret = run_or_dryrun("psql --host=%(db_host)s --user=%(db_user)s -d %(db_name)s -f %(put_remote_path)s" % env) elif 'mysql' in env.db_engine: ret = run_or_dryrun("mysql -h %(db_host)s -u %(db_user)s -p'%(db_password)s' %(db_name)s < %(put_remote_path)s" % env) else: raise NotImplementedError('Unknown database type: %s' % env.db_engine) print('ret:', ret) site_summary[site] = ret except KeyError as e: site_summary[site] = 'Error: %s' % str(e) pass print('-'*80) print('Site Summary:') for site, ret in sorted(site_summary.items(), key=lambda o: o[0]): print(site, ret)
def appendline(fqfn, line, use_sudo=0, verbose=1, commands_only=0): """ Appends the given line to the given file only if the line does not already exist in the file. """ verbose = int(verbose) commands_only = int(commands_only) use_sudo = int(use_sudo) kwargs = dict(fqfn=fqfn, line=line) cmd = 'grep -qF "{line}" {fqfn} || echo "{line}" >> {fqfn}'.format( **kwargs) if verbose: print(cmd) if not commands_only: if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return [cmd]
def syncdb(site=None, all=0, database=None): """ Runs the standard Django syncdb command for one or more sites. """ #print 'Running syncdb...' _env = type(env)(env) _env.db_syncdb_all_flag = '--all' if int(all) else '' _env.db_syncdb_database = '' if database: _env.db_syncdb_database = ' --database=%s' % database _env = render_remote_paths(e=_env) for site, site_data in iter_unique_databases(site=site): cmd = ( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; ' '%(django_manage)s syncdb --noinput %(db_syncdb_all_flag)s %(db_syncdb_database)s') % _env run_or_dryrun(cmd)
def syncdb(site=None, all=0, database=None): """ Runs the standard Django syncdb command for one or more sites. """ #print 'Running syncdb...' _env = type(env)(env) _env.db_syncdb_all_flag = '--all' if int(all) else '' _env.db_syncdb_database = '' if database: _env.db_syncdb_database = ' --database=%s' % database _env = render_remote_paths(e=_env) for site, site_data in iter_unique_databases(site=site): cmd = ( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; ' '%(django_manage)s syncdb --noinput %(db_syncdb_all_flag)s %(db_syncdb_database)s' ) % _env run_or_dryrun(cmd)
def get_free_space(): """ Return free space in bytes. """ cmd = "df -k | grep -vE '^Filesystem|tmpfs|cdrom|none|udev|cgroup' | awk '{ print($1 \" \" $4 }'" lines = [_ for _ in run_or_dryrun(cmd).strip().split('\n') if _.startswith('/')] assert len(lines) == 1, 'Ambiguous devices: %s' % str(lines) device, kb = lines[0].split(' ') free_space = int(kb) * 1024 if int(verbose): print('free_space (bytes):', free_space) return free_space
def init(clean=0, check_global=0, virtualenv_dir=None, check_permissions=None): """ Creates the virtual environment. """ assert env[ROLE] render_paths() # Delete any pre-existing environment. if int(clean): clean_virtualenv(virtualenv_dir=virtualenv_dir) if virtualenv_exists(virtualenv_dir=virtualenv_dir): print('virtualenv exists') return # Important. Default Ubuntu 12.04 package uses Pip 1.0, which # is horribly buggy. Should use 1.3 or later. if int(check_global): print('Ensuring the global pip install is up-to-date.') sudo_or_dryrun('pip install --upgrade pip') virtualenv_dir = virtualenv_dir or env.pip_virtual_env_dir #if not files.exists(env.pip_virtual_env_dir): print('Creating new virtual environment...') with settings(warn_only=True): cmd = 'virtualenv --no-site-packages %s' % virtualenv_dir if env.is_local: run_or_dryrun(cmd) else: sudo_or_dryrun(cmd) if check_permissions is None: check_permissions = env.pip_check_permissions if not env.is_local and check_permissions: sudo_or_dryrun( 'chown -R %(pip_user)s:%(pip_group)s %(remote_app_dir)s' % env) sudo_or_dryrun('chmod -R %(pip_chmod)s %(remote_app_dir)s' % env)
def manage(cmd, *args, **kwargs): """ A generic wrapper around Django's manage command. """ render_remote_paths() environs = kwargs.pop('environs', '').strip() if environs: environs = ' '.join('export %s=%s;' % tuple(_.split('=')) for _ in environs.split(',')) environs = ' ' + environs + ' ' env.dj_cmd = cmd env.dj_args = ' '.join(map(str, args)) env.dj_kwargs = ' '.join( ('--%s' % _k if _v in (True, 'True') else '--%s=%s' % (_k, _v)) for _k, _v in kwargs.iteritems()) env.dj_environs = environs cmd = ( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s;%(dj_environs)scd %(remote_manage_dir)s; ' '%(django_manage)s %(dj_cmd)s %(dj_args)s %(dj_kwargs)s') % env run_or_dryrun(cmd)
def install_fixtures(name, site=None): """ Installs a set of Django fixtures. """ from burlap.dj import render_remote_paths set_site(site) render_remote_paths() fixtures_paths = env.db_fixture_sets.get(name, []) for fixture_path in fixtures_paths: env.db_fq_fixture_path = os.path.join(env.remote_app_src_package_dir, fixture_path) print('Loading %s...' % (env.db_fq_fixture_path,)) if not env.is_local and not files.exists(env.db_fq_fixture_path): put_or_dryrun( local_path=env.db_fq_fixture_path, remote_path='/tmp/data.json', use_sudo=True, ) env.db_fq_fixture_path = env.put_remote_path cmd = 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s loaddata %(db_fq_fixture_path)s' % env print(cmd) run_or_dryrun(cmd)
def install_fixtures(name, site=None): """ Installs a set of Django fixtures. """ from burlap.dj import render_remote_paths set_site(site) render_remote_paths() fixtures_paths = env.db_fixture_sets.get(name, []) for fixture_path in fixtures_paths: env.db_fq_fixture_path = os.path.join(env.remote_app_src_package_dir, fixture_path) print('Loading %s...' % (env.db_fq_fixture_path, )) if not env.is_local and not files.exists(env.db_fq_fixture_path): put_or_dryrun( local_path=env.db_fq_fixture_path, remote_path='/tmp/data.json', use_sudo=True, ) env.db_fq_fixture_path = env.put_remote_path cmd = 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; cd %(remote_manage_dir)s; %(django_manage)s loaddata %(db_fq_fixture_path)s' % env print(cmd) run_or_dryrun(cmd)
def dumpload(): """ Dumps and loads a database snapshot simultaneously. Requires that the destination server has direct database access to the source server. This is better than a serial dump+load when: 1. The network connection is reliable. 2. You don't need to save the dump file. The benefits of this over a dump+load are: 1. Usually runs faster, since the load and dump happen in parallel. 2. Usually takes up less disk space since no separate dump file is downloaded. """ set_db(site=env.SITE, role=env.ROLE) if 'postgres' in env.db_engine or 'postgis' in env.db_engine: cmd = ('pg_dump -c --host=%(host_string)s --username=%(db_user)s '\ '--blobs --format=c %(db_name)s -n public | '\ 'pg_restore -U %(db_postgresql_postgres_user)s --create '\ '--dbname=%(db_name)s') % env run_or_dryrun(cmd) else: raise NotImplementedError
def manage(cmd, *args, **kwargs): """ A generic wrapper around Django's manage command. """ render_remote_paths() environs = kwargs.pop('environs', '').strip() if environs: environs = ' '.join('export %s=%s;' % tuple(_.split('=')) for _ in environs.split(',')) environs = ' ' + environs + ' ' env.dj_cmd = cmd env.dj_args = ' '.join(map(str, args)) env.dj_kwargs = ' '.join( ('--%s' % _k if _v in (True, 'True') else '--%s=%s' % (_k, _v)) for _k, _v in kwargs.iteritems()) env.dj_environs = environs cmd = ( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s;%(dj_environs)scd %(remote_manage_dir)s; ' '%(django_manage)s %(dj_cmd)s %(dj_args)s %(dj_kwargs)s') % env run_or_dryrun(cmd)
def test_sudo(self): all_users = run_or_dryrun('cut -d: -f1 /etc/passwd') print('all users:', all_users) ret = (sudo_or_dryrun('whoami') or '').split('\n')[-1] print('ret0:', ret) self.assertEqual(ret, 'root') if 'travis' in all_users: target_user = '******' else: target_user = '******' ret = (sudo_or_dryrun('whoami', user=target_user) or '').split('\n')[-1] print('ret1:', ret) self.assertEqual(ret, target_user)
def test_sudo(self): all_users = run_or_dryrun('cut -d: -f1 /etc/passwd') print('all users:', all_users) ret = sudo_or_dryrun('whoami') print('ret0:', ret) self.assertEqual(ret, 'root') if 'travis' in all_users: target_user = '******' else: target_user = '******' ret = sudo_or_dryrun('whoami', user=target_user) print('ret1:', ret) self.assertEqual(ret, target_user)
def get_size(): """ Retrieves the size of the database in bytes. """ from burlap.dj import set_db set_db(site=env.SITE, role=env.ROLE) if 'postgres' in env.db_engine or 'postgis' in env.db_engine: #cmd = 'psql --user=%(db_postgresql_postgres_user)s --tuples-only -c "SELECT pg_size_pretty(pg_database_size(\'%(db_name)s\'));"' % env cmd = 'psql --user=%(db_postgresql_postgres_user)s --tuples-only -c "SELECT pg_database_size(\'%(db_name)s\');"' % env #print cmd output = run_or_dryrun(cmd) output = int(output.strip().split('\n')[-1].strip()) if int(verbose): print('database size (bytes):', output) return output else: raise NotImplementedError
def upgrade_pip(): from burlap.dj import render_remote_paths render_remote_paths() if env.pip_virtual_env_dir_template: env.pip_virtual_env_dir = env.pip_virtual_env_dir_template % env sudo_or_dryrun( "chown -R %(pip_user)s:%(pip_group)s %(pip_virtual_env_dir)s" % env) run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade setuptools" % env) run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade distribute" % env) with settings(warn_only=True): run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade pip" % env)
def run_paths(paths, cmd_template, max_retries=3): paths = list(paths) error_counts = defaultdict(int) # {path:count} terminal = set() while paths: path = paths.pop(0) app_name = re.findall(r'/([^/]+)/sql/', path)[0] if apps and app_name not in apps: continue with settings(warn_only=True): put_or_dryrun(local_path=path) cmd = cmd_template % env error_code = run_or_dryrun(cmd) if error_code: error_counts[path] += 1 if error_counts[path] < max_retries: paths.append(path) else: terminal.add(path) if terminal: print('%i files could not be loaded.' % len(terminal), file=sys.stderr) for path in sorted(list(terminal)): print(path, file=sys.stderr) print(file=sys.stderr)
def sync(sync_set, force=0): """ Uploads media to an Amazon S3 bucket using s3sync. Requires the s3sync gem: sudo gem install s3sync """ from burlap.dj import get_settings, render_remote_paths force = int(force) env.s3_sync_force_flag = ' --force ' if force else '' # print'env.SITE:',env.SITE _settings = get_settings(verbose=1) assert _settings, 'Unable to import settings.' for k in _settings.__dict__.iterkeys(): if k.startswith('AWS_'): env[k] = _settings.__dict__[k] #local_or_dryrun('which s3sync') #print 'AWS_STATIC_BUCKET_NAME:',_settings.AWS_STATIC_BUCKET_NAME render_remote_paths() site_data = env.sites[env.SITE] env.update(site_data) rets = [] for paths in env.s3_sync_sets[sync_set]: is_local = paths.get('is_local', True) local_path = paths['local_path'] % env remote_path = paths['remote_path'] remote_path = remote_path.replace(':/', '/') if not remote_path.startswith('s3://'): remote_path = 's3://' + remote_path local_path = local_path % env if is_local: #local_or_dryrun('which s3sync')#, capture=True) env.s3_local_path = os.path.abspath(local_path) else: #run('which s3sync') env.s3_local_path = local_path if local_path.endswith('/') and not env.s3_local_path.endswith('/'): env.s3_local_path = env.s3_local_path + '/' env.s3_remote_path = remote_path % env print('Syncing %s to %s...' % (env.s3_local_path, env.s3_remote_path)) # Old buggy Ruby version. # cmd = ('export AWS_ACCESS_KEY_ID=%(AWS_ACCESS_KEY_ID)s; '\ # 'export AWS_SECRET_ACCESS_KEY=%(AWS_SECRET_ACCESS_KEY)s; '\ # 's3sync --recursive --verbose --progress --public-read '\ # '%(s3_local_path)s %(s3_remote_path)s') % env # Superior Python version. if force: env.s3_sync_cmd = 'put' else: env.s3_sync_cmd = 'sync' cmd = ( 'export AWS_ACCESS_KEY_ID=%(AWS_ACCESS_KEY_ID)s; '\ 'export AWS_SECRET_ACCESS_KEY=%(AWS_SECRET_ACCESS_KEY)s; '\ 's3cmd %(s3_sync_cmd)s --progress --acl-public --guess-mime-type --no-mime-magic '\ '--delete-removed --cf-invalidate --recursive %(s3_sync_force_flag)s '\ '%(s3_local_path)s %(s3_remote_path)s') % env if is_local: local_or_dryrun(cmd) else: run_or_dryrun(cmd)
def list_server_specs(cpu=1, memory=1, hdd=1): """ Displays a list of common servers characteristics, like number of CPU cores, amount of memory and hard drive capacity. """ cpu = int(cpu) memory = int(memory) hdd= int(hdd) # CPU if cpu: cmd = 'cat /proc/cpuinfo | grep -i "model name"' ret = run_or_dryrun(cmd) matches = map(str.strip, re.findall('model name\s+:\s*([^\n]+)', ret, re.DOTALL|re.I)) cores = {} for match in matches: cores.setdefault(match, 0) cores[match] += 1 # Memory if memory: cmd = 'dmidecode --type 17' ret = sudo_or_dryrun(cmd) #print repr(ret) matches = re.findall('Memory\s+Device\r\n(.*?)(?:\r\n\r\n|$)', ret, flags=re.DOTALL|re.I) #print len(matches) #print matches[0] memory_slot_dicts = [] for match in matches: attrs = dict([(_a.strip(), _b.strip()) for _a, _b in re.findall('^([^:]+):\s+(.*)$', match, flags=re.MULTILINE)]) #print attrs memory_slot_dicts.append(attrs) total_memory_gb = 0 total_slots_filled = 0 total_slots = len(memory_slot_dicts) memory_types = set() memory_forms = set() memory_speeds = set() for memory_dict in memory_slot_dicts: try: size = int(round(float(re.findall('([0-9]+)\s+MB', memory_dict['Size'])[0])/1024.)) #print size total_memory_gb += size total_slots_filled += 1 except IndexError: pass _v = memory_dict['Type'] if _v != 'Unknown': memory_types.add(_v) _v = memory_dict['Form Factor'] if _v != 'Unknown': memory_forms.add(_v) _v = memory_dict['Speed'] if _v != 'Unknown': memory_speeds.add(_v) # Storage if hdd: #cmd = 'ls /dev/*d* | grep "/dev/[a-z]+d[a-z]$"' cmd = 'find /dev -maxdepth 1 | grep -E "/dev/[a-z]+d[a-z]$"' devices = map(str.strip, run_or_dryrun(cmd).split('\n')) total_drives = len(devices) total_physical_storage_gb = 0 total_logical_storage_gb = 0 drive_transports = set() for device in devices: cmd = 'udisks --show-info %s |grep -i " size:"' % (device) ret = run_or_dryrun(cmd) size_bytes = float(re.findall('size:\s*([0-9]+)', ret)[0].strip()) size_gb = int(round(size_bytes/1024/1024/1024)) #print device, size_gb total_physical_storage_gb += size_gb with settings(warn_only=True): cmd = 'hdparm -I %s|grep -i "Transport:"' % device ret = sudo_or_dryrun(cmd) if ret and not ret.return_code: drive_transports.add(ret.split('Transport:')[-1].strip()) cmd = "df | grep '^/dev/[mhs]d*' | awk '{s+=$2} END {print s/1048576}'" ret = run_or_dryrun(cmd) total_logical_storage_gb = float(ret) if cpu: print('-'*80) print('CPU') print('-'*80) type_str = ', '.join(['%s x %i' % (_type, _count) for _type, _count in cores.items()]) print('Cores: %i' % sum(cores.values())) print('Types: %s' % type_str) if memory: print('-'*80) print('MEMORY') print('-'*80) print('Total: %s GB' % total_memory_gb) print('Type: %s' % list_to_str_or_unknown(memory_types)) print('Form: %s' % list_to_str_or_unknown(memory_forms)) print('Speed: %s' % list_to_str_or_unknown(memory_speeds)) print('Slots: %i (%i filled, %i empty)' % (total_slots, total_slots_filled, total_slots - total_slots_filled)) if hdd: print('-'*80) print('STORAGE') print('-'*80) print('Total physical drives: %i' % total_drives) print('Total physical storage: %s GB' % total_physical_storage_gb) print('Total logical storage: %s GB' % total_logical_storage_gb) print('Types: %s' % list_to_str_or_unknown(drive_transports))
def get_or_create_ec2_instance(name=None, group=None, release=None, verbose=0, backend_opts={}): """ Creates a new EC2 instance. You should normally run get_or_create() instead of directly calling this. """ from burlap.common import shelf, OrderedDict from boto.exception import EC2ResponseError assert name, "A name must be specified." verbose = int(verbose) conn = get_ec2_connection() security_groups = get_or_create_ec2_security_groups() security_group_ids = [_.id for _ in security_groups] if verbose: print('security_groups:',security_group_ids) pem_path = get_or_create_ec2_key_pair() assert env.vm_ec2_ami, 'No AMI specified.' print('Creating EC2 instance from %s...' % (env.vm_ec2_ami,)) print(env.vm_ec2_zone) opts = backend_opts.get('run_instances', {}) reservation = conn.run_instances( env.vm_ec2_ami, key_name=env.vm_ec2_keypair_name, #security_groups=env.vm_ec2_selected_security_groups,#conflicts with subnet_id?! security_group_ids=security_group_ids, placement=env.vm_ec2_zone, instance_type=env.vm_ec2_instance_type, subnet_id=env.vm_ec2_subnet_id, **opts ) instance = reservation.instances[0] # Name new instance. # Note, creation is not instantious, so we may have to wait for a moment # before we can access it. while 1: try: if name: instance.add_tag(env.vm_name_tag, name) if group: instance.add_tag(env.vm_group_tag, group) if release: instance.add_tag(env.vm_release_tag, release) break except EC2ResponseError as e: #print('Unable to set tag: %s' % e) print('Waiting for the instance to be created...') if verbose: print(e) time.sleep(3) # Assign IP. allocation_id = None if env.vm_ec2_use_elastic_ip: # Initialize name/ip mapping since we can't tag elastic IPs. shelf.setdefault('vm_elastic_ip_mappings', OrderedDict()) vm_elastic_ip_mappings = shelf.get('vm_elastic_ip_mappings') elastic_ip = vm_elastic_ip_mappings.get(name) if not elastic_ip: print('Allocating new elastic IP address...') addr = conn.allocate_address(domain=env.vm_ec2_allocate_address_domain) #allocation_id = addr.allocation_id #print('allocation_id:',allocation_id) elastic_ip = addr.public_ip print('Allocated address %s.' % elastic_ip) vm_elastic_ip_mappings[name] = str(elastic_ip) shelf.set('vm_elastic_ip_mappings', vm_elastic_ip_mappings) #conn.get_all_addresses() # Lookup allocation_id. all_eips = conn.get_all_addresses() for eip in all_eips: if elastic_ip == eip.public_ip: allocation_id = eip.allocation_id break print('allocation_id:',allocation_id) while 1: try: conn.associate_address( instance_id=instance.id, #public_ip=elastic_ip, allocation_id=allocation_id, # needed for VPC instances ) print('IP address associated!') break except EC2ResponseError as e: #print('Unable to assign IP: %s' % e) print('Waiting to associate IP address...') if verbose: print(e) time.sleep(3) # Confirm public DNS name was assigned. while 1: try: instance = get_all_ec2_instances(instance_ids=[instance.id])[0] #assert instance.public_dns_name, 'No public DNS name found!' if instance.public_dns_name: break except Exception as e: print('error:',e) except SystemExit as e: print('systemexit:',e) pass print('Waiting for public DNS name to be assigned...') time.sleep(3) # Confirm we can SSH into the server. #TODO:better handle timeouts? try/except doesn't really work? env.connection_attempts = 10 while 1: try: with settings(warn_only=True): env.host_string = instance.public_dns_name ret = run_or_dryrun('who -b') #print 'ret.return_code:',ret.return_code if not ret.return_code: break except Exception as e: print('error:',e) except SystemExit as e: print('systemexit:',e) pass print('Waiting for sshd to accept connections...') time.sleep(3) print("") print("Login with: ssh -o StrictHostKeyChecking=no -i %s %s@%s" \ % (pem_path, env.user, instance.public_dns_name)) print("OR") print("fab %(ROLE)s:hostname=%(name)s shell" % dict(name=name, ROLE=env.ROLE)) ip = socket.gethostbyname(instance.public_dns_name) print("") print("""Example hosts entry:) %(ip)s www.mydomain.com # %(name)s""" % dict(ip=ip, name=name)) return instance
def load(db_dump_fn='', prep_only=0, force_upload=0, from_local=0): """ Restores a database snapshot onto the target database server. If prep_only=1, commands for preparing the load will be generated, but not the command to finally load the snapshot. """ verbose = common.get_verbose() from burlap.dj import set_db from burlap.common import get_dryrun if not db_dump_fn: db_dump_fn = get_default_db_fn() env.db_dump_fn = render_fn(db_dump_fn).strip() set_db(site=env.SITE, role=env.ROLE) from_local = int(from_local) prep_only = int(prep_only) # Copy snapshot file to target. missing_local_dump_error = ( "Database dump file %(db_dump_fn)s does not exist." ) % env if env.is_local: env.db_remote_dump_fn = db_dump_fn else: env.db_remote_dump_fn = '/tmp/'+os.path.split(env.db_dump_fn)[-1] if not prep_only: if int(force_upload) or (not get_dryrun() and not env.is_local and not files.exists(env.db_remote_dump_fn)): assert os.path.isfile(env.db_dump_fn), \ missing_local_dump_error if verbose: print('Uploading database snapshot...') put_or_dryrun(local_path=env.db_dump_fn, remote_path=env.db_remote_dump_fn) if env.is_local and not prep_only and not get_dryrun(): assert os.path.isfile(env.db_dump_fn), \ missing_local_dump_error if env.db_load_command: cmd = env.db_load_command % env run_or_dryrun(cmd) elif 'postgres' in env.db_engine or 'postgis' in env.db_engine: set_root_login() with settings(warn_only=True): cmd = 'dropdb --user=%(db_postgresql_postgres_user)s %(db_name)s' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s -c "CREATE DATABASE %(db_name)s;"' % env run_or_dryrun(cmd) with settings(warn_only=True): if 'postgis' in env.db_engine: cmd = 'psql --user=%(db_postgresql_postgres_user)s --no-password --dbname=%(db_name)s --command="CREATE EXTENSION postgis;"' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s --no-password --dbname=%(db_name)s --command="CREATE EXTENSION postgis_topology;"' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s -c "DROP OWNED BY %(db_user)s CASCADE;"' % env run_or_dryrun(cmd) cmd = ('psql --user=%(db_postgresql_postgres_user)s -c "DROP USER IF EXISTS %(db_user)s; ' 'CREATE USER %(db_user)s WITH PASSWORD \'%(db_password)s\'; ' 'GRANT ALL PRIVILEGES ON DATABASE %(db_name)s to %(db_user)s;"') % env run_or_dryrun(cmd) for createlang in env.db_postgresql_createlangs: env.db_createlang = createlang cmd = 'createlang -U %(db_postgresql_postgres_user)s %(db_createlang)s %(db_name)s || true' % env run_or_dryrun(cmd) if not prep_only: #cmd = 'gunzip -c %(db_remote_dump_fn)s | pg_restore --jobs=8 -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s' % env #TODO:deprecated #cmd = 'gunzip -c %(db_remote_dump_fn)s | pg_restore -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s' % env #TODO:deprecated if env.db_postgresql_custom_load_cmd: cmd = env.db_postgresql_custom_load_cmd % env else: cmd = 'pg_restore --jobs=8 -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s %(db_remote_dump_fn)s' % env run_or_dryrun(cmd) elif 'mysql' in env.db_engine: set_root_login() # Drop the database if it's there. #cmd = ("mysql -v -h %(db_host)s -u %(db_user)s -p'%(db_password)s' " cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "--execute='DROP DATABASE IF EXISTS %(db_name)s'") % env run_or_dryrun(cmd) # Now, create the database. #cmd = ("mysqladmin -h %(db_host)s -u %(db_user)s -p'%(db_password)s' " cmd = ("mysqladmin -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "create %(db_name)s") % env run_or_dryrun(cmd) # Create user with settings(warn_only=True): cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "--execute=\"CREATE USER '%(db_user)s'@'%%' IDENTIFIED BY '%(db_password)s'; GRANT ALL PRIVILEGES ON *.* TO '%(db_user)s'@'%%' WITH GRANT OPTION; FLUSH PRIVILEGES;\"") % env run_or_dryrun(cmd) # DROP USER '<username>'@'%'; # CREATE USER '<username>'@'%' IDENTIFIED BY '<password>'; # GRANT ALL PRIVILEGES ON *.* TO '<username>'@'%' WITH GRANT OPTION; # FLUSH PRIVILEGES; # Set collation. # cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " # "--execute='ALTER DATABASE %(db_name)s CHARACTER SET %(db_mysql_character_set)s COLLATE %(db_mysql_collate)s;'") % env set_collation_mysql() # Raise max packet limitation. # run_or_dryrun( # ('mysql -v -h %(db_host)s -D %(db_name)s -u %(db_root_user)s ' # '-p"%(db_root_password)s" --execute="SET global ' # 'net_buffer_length=%(db_mysql_net_buffer_length)s; SET global ' # 'max_allowed_packet=%(db_mysql_max_allowed_packet)s;"') % env) set_max_mysql_packet_size(do_set=0) # Run any server-specific commands (e.g. to setup permissions) before # we load the data. for command in env.db_mysql_preload_commands: run_or_dryrun(command % env) # Restore the database content from the dump file. env.db_dump_fn = db_dump_fn cmd = ('gunzip < %(db_remote_dump_fn)s | mysql -u %(db_root_user)s ' '--password=%(db_root_password)s --host=%(db_host)s ' '-D %(db_name)s') % env run_or_dryrun(cmd) set_collation_mysql() else: raise NotImplemented
def exists(self, name='default', site=None): """ Returns true if a database with the given name exists. False otherwise. """ if self.verbose: print('!'*80) print('db.exists:', name) if name and self.env.connection_handler == CONNECTION_HANDLER_DJANGO: from burlap.dj import set_db set_db(name=name, site=site, verbose=verbose) load_db_set(name=name) self.set_root_login() ret = None if 'postgres' in env.db_engine or 'postgis' in env.db_engine: kwargs = dict( db_user=env.db_root_user, db_password=env.db_root_password, db_host=env.db_host, db_name=env.db_name, ) env.update(kwargs) # Set pgpass file. if env.db_password: self.write_pgpass(verbose=verbose, name=name) # cmd = ('psql --username={db_user} --no-password -l '\ # '--host={db_host} --dbname={db_name}'\ # '| grep {db_name} | wc -l').format(**env) cmd = ('psql --username={db_user} --host={db_host} -l '\ '| grep {db_name} | wc -l').format(**env) if verbose: print(cmd) with settings(warn_only=True): ret = run_or_dryrun(cmd) #print 'ret:', ret if ret is not None: if 'password authentication failed' in ret: ret = False else: ret = int(ret) >= 1 elif 'mysql' in env.db_engine: kwargs = dict( db_user=env.db_root_user, db_password=env.db_root_password, db_host=env.db_host, db_name=env.db_name, ) env.update(kwargs) cmd = ('mysql -h {db_host} -u {db_user} '\ '-p"{db_password}" -N -B -e "SELECT IF(\'{db_name}\''\ ' IN(SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA), '\ '\'exists\', \'notexists\') AS found;"').format(**env) if verbose: print(cmd) ret = run_or_dryrun(cmd) if ret is not None: ret = 'notexists' not in (ret or 'notexists') else: raise NotImplementedError if ret is not None: print('%s database on site %s %s exist' % (name, env.SITE, 'DOES' if ret else 'DOES NOT')) return ret
def install(package='', clean=0, no_deps=1, all=0, upgrade=1): """ Installs the local cache of pip packages. """ from burlap.dj import render_remote_paths print('Installing pip requirements...') assert env[ROLE] require('is_local') # Delete any pre-existing environment. if int(clean): clean_virtualenv() render_remote_paths() if env.pip_virtual_env_dir_template: env.pip_virtual_env_dir = env.pip_virtual_env_dir_template % env env.pip_local_cache_dir = env.pip_local_cache_dir_template % env env.pip_path_versioned = env.pip_path % env if env.is_local: env.pip_cache_dir = os.path.abspath(env.pip_local_cache_dir % env) else: env.pip_cache_dir = env.pip_remote_cache_dir % env print('env.host_string:', env.host_string) print('env.key_filename:', env.key_filename) run_or_dryrun('mkdir -p %(pip_cache_dir)s' % env) if not env.pip_cache_dir.endswith('/'): env.pip_cache_dir = env.pip_cache_dir + '/' env.pip_key_filename = os.path.abspath(env.key_filename) local_or_dryrun( 'rsync -avz --progress --rsh "ssh -o StrictHostKeyChecking=no -i %(pip_key_filename)s" %(pip_local_cache_dir)s/* %(user)s@%(host_string)s:%(pip_cache_dir)s' % env) env.pip_upgrade_flag = '' if int(upgrade): env.pip_upgrade_flag = ' -U ' env.pip_no_deps = '' if int(no_deps): env.pip_no_deps = '--no-deps' if int(all): packages = list(iter_pip_requirements()) elif package: packages = [package] else: packages = [k for k, v in check()] env.pip_build_dir = tempfile.mkdtemp() for package in packages: env.pip_package = package if env.is_local: run_or_dryrun(env.pip_install_command % env) else: sudo_or_dryrun(env.pip_install_command % env) if not env.is_local: sudo_or_dryrun( 'chown -R %(pip_user)s:%(pip_group)s %(remote_app_dir)s' % env) sudo_or_dryrun('chmod -R %(pip_chmod)s %(remote_app_dir)s' % env)
def shell(self, name='default', user=None, password=None, root=0, verbose=1, write_password=1, no_db=0, no_pw=0): """ Opens a SQL shell to the given database, assuming the configured database and user supports this feature. """ from burlap.dj import set_db verbose = self.verbose root = int(root) write_password = int(write_password) no_db = int(no_db) no_pw = int(no_pw) # Load database credentials. set_db(name=name, verbose=verbose) load_db_set(name=name, verbose=verbose) set_root_login() if root: env.db_user = env.db_root_user env.db_password = env.db_root_password else: if user is not None: env.db_user = user if password is not None: env.db_password = password # Switch relative to absolute host name. env.db_shell_host = env.db_host # if env.db_shell_host in ('localhost', '127.0.0.1'): # env.db_shell_host = env.host_string if no_pw: env.db_password = '' cmds = [] env.db_name_str = '' if 'postgres' in env.db_engine or 'postgis' in env.db_engine: # Note, psql does not support specifying password at the command line. # If you don't want to manually type it at the command line, you must # add the password to your local ~/.pgpass file. # Each line in that file should be formatted as: # host:port:username:password # Set pgpass file. if write_password and env.db_password: cmds.extend(write_postgres_pgpass(verbose=0, commands_only=1, name=name)) if not no_db: env.db_name_str = ' --dbname=%(db_name)s' % env cmds.append(('/bin/bash -i -c \"psql --username=%(db_user)s '\ '--host=%(db_shell_host)s%(db_name_str)s\"') % env) elif 'mysql' in env.db_engine: if not no_db: env.db_name_str = ' %(db_name)s' % env if env.db_password: cmds.append(('/bin/bash -i -c \"mysql -u %(db_user)s '\ '-p\'%(db_password)s\' -h %(db_shell_host)s%(db_name_str)s\"') % env) else: cmds.append(('/bin/bash -i -c \"mysql -u %(db_user)s '\ '-h %(db_shell_host)s%(db_name_str)s\"') % env) else: raise NotImplementedError if cmds: for cmd in cmds: if verbose: print(cmd) if env.is_local: local_or_dryrun(cmd) else: run_or_dryrun(cmd)
def create(self, drop=0, name='default', site=None, post_process=0, db_engine=None, db_user=None, db_host=None, db_password=None, db_name=None): """ Creates the target database. """ from burlap.dj import set_db, render_remote_paths assert env[ROLE] require('app_name') drop = int(drop) # Do nothing if we're not dropping and the database already exists. print('Checking to see if database already exists...') if self.exists(name=name, site=site) and not drop: print('Database already exists. Aborting creation. '\ 'Use drop=1 to override.') return env.db_drop_flag = '--drop' if drop else '' if name: set_db(name=name, site=site) load_db_set(name=name) if db_engine: env.db_engine = db_engine if db_user: env.db_user = db_user if db_host: env.db_host = db_host if db_password: env.db_password = db_password if db_name: env.db_name = db_name if 'postgres' in env.db_engine or 'postgis' in env.db_engine: pass elif 'mysql' in env.db_engine: set_root_login() if int(drop): cmd = "mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' --execute='DROP DATABASE IF EXISTS %(db_name)s'" % env sudo_or_dryrun(cmd) cmd = "mysqladmin -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' create %(db_name)s" % env sudo_or_dryrun(cmd) set_collation_mysql() # Create user. cmd = "mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' --execute=\"GRANT USAGE ON *.* TO %(db_user)s@'%%'; DROP USER %(db_user)s@'%%';\"" % env run_or_dryrun(cmd) # Grant user access to the database. cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s "\ "-p'%(db_root_password)s' --execute=\"GRANT ALL PRIVILEGES "\ "ON %(db_name)s.* TO %(db_user)s@'%%' IDENTIFIED BY "\ "'%(db_password)s'; FLUSH PRIVILEGES;\"") % env run_or_dryrun(cmd) #TODO:why is this necessary? why doesn't the user@% pattern above give #localhost access?! cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s "\ "-p'%(db_root_password)s' --execute=\"GRANT ALL PRIVILEGES "\ "ON %(db_name)s.* TO %(db_user)s@%(db_host)s IDENTIFIED BY "\ "'%(db_password)s'; FLUSH PRIVILEGES;\"") % env run_or_dryrun(cmd) # Let the primary login do so from everywhere. # cmd = 'mysql -h %(db_host)s -u %()s -p'%(db_root_password)s' --execute="USE mysql; GRANT ALL ON %(db_name)s.* to %(db_user)s@\'%\' IDENTIFIED BY \'%(db_password)s\'; FLUSH PRIVILEGES;"' # sudo_or_dryrun(cmd) else: raise NotImplemented
def test_dryrun(self): set_dryrun(True) run_or_dryrun('touch ~/abc.txt') assert not is_file('~/abc.txt') set_dryrun(1) run_or_dryrun('touch ~/def.txt') assert not is_file('~/def.txt') set_dryrun(False) run_or_dryrun('touch ~/mno.txt') assert not is_file('~/mno.txt') run_or_dryrun('rm -f ~/mno.txt') set_dryrun(0) run_or_dryrun('touch ~/xyz.txt') assert not is_file('~/xyz.txt') run_or_dryrun('rm -f ~/xyz.txt')
def disk(): """ Display percent of disk usage. """ run_or_dryrun(env.disk_usage_command % env)
def run(command): with settings(warn_only=True): run_or_dryrun(command)
def check(return_type=PENDING): """ Lists the packages that are missing or obsolete on the target. return_type := pending|installed """ # from burlap.plan import get_original # run0 = get_original('run') # import inspect # print('run0:',run0, inspect.getsourcefile(run0) assert env[ROLE] ignored_packages = set(['pip', 'argparse']) env.pip_path_versioned = env.pip_path % env init() def get_version_nums(v): if re.findall('^[0-9\.]+$', v): return tuple(int(_) for _ in v.split('.') if _.strip().isdigit()) use_virt = env.pip_use_virt if use_virt: cmd_template = ". %(pip_virtual_env_dir)s/bin/activate; %(pip_path_versioned)s freeze; deactivate" else: cmd_template = "%(pip_path_versioned)s freeze" cmd = cmd_template % env result = run_or_dryrun(cmd) installed_package_versions = {} for line in result.split('\n'): line = line.strip() if '#' in line: line = line.split('#')[0].strip() if not line: continue elif line.startswith('#'): continue elif ' ' in line: continue try: k, v = line.split('==') except ValueError as e: # print('Malformed line: %s' % line # sys.exit() continue if not k.strip() or not v.strip(): continue print('Installed:', k, v) if k.strip().lower() in ignored_packages: continue installed_package_versions[k.strip()] = v.strip() desired_package_version = get_desired_package_versions() for k, v in desired_package_version.iteritems(): print('Desired:', k, v) pending = [] # (package_line, type)] not_installed = {} for k, (v, line) in desired_package_version.iteritems(): if k not in installed_package_versions: not_installed[k] = (v, line) if not_installed: print('!' * 80) print('Not installed:') for k, (v, line) in sorted(not_installed.iteritems(), key=lambda o: o[0]): if k.lower() in ignored_packages: continue print(k, v) pending.append((line, 'install')) else: print('-' * 80) print('All are installed.') obsolete = {} for k, (v, line) in desired_package_version.iteritems(): #line if v != 'current' and v != installed_package_versions.get(k, v): obsolete[k] = (v, line) if obsolete: print('!' * 80) print('Obsolete:') for k, (v0, line) in sorted(obsolete.iteritems(), key=lambda o: o[0]): v0nums = get_version_nums(v0) or v0 v1 = installed_package_versions[k] v1nums = get_version_nums(v1) or v1 #print 'v1nums > v0nums:',v1nums, v0nums installed_is_newer = v1nums > v0nums newer_str = '' if installed_is_newer: newer_str = ', this is newer!!! Update pip-requirements.txt???' print(k, v0, '(Installed is %s%s)' % (v1, newer_str)) pending.append((line, 'update')) else: print('-' * 80) print('None are obsolete.') if return_type == INSTALLED: return installed_package_versions return pending
def sync(sync_set, force=0): """ Uploads media to an Amazon S3 bucket using s3sync. Requires the s3sync gem: sudo gem install s3sync """ from burlap.dj import get_settings, render_remote_paths force = int(force) env.s3_sync_force_flag = ' --force ' if force else '' # print'env.SITE:',env.SITE _settings = get_settings(verbose=1) assert _settings, 'Unable to import settings.' for k in _settings.__dict__.iterkeys(): if k.startswith('AWS_'): env[k] = _settings.__dict__[k] #local_or_dryrun('which s3sync') #print 'AWS_STATIC_BUCKET_NAME:',_settings.AWS_STATIC_BUCKET_NAME render_remote_paths() site_data = env.sites[env.SITE] env.update(site_data) rets = [] for paths in env.s3_sync_sets[sync_set]: is_local = paths.get('is_local', True) local_path = paths['local_path'] % env remote_path = paths['remote_path'] remote_path = remote_path.replace(':/', '/') if not remote_path.startswith('s3://'): remote_path = 's3://' + remote_path local_path = local_path % env if is_local: #local_or_dryrun('which s3sync')#, capture=True) env.s3_local_path = os.path.abspath(local_path) else: #run('which s3sync') env.s3_local_path = local_path if local_path.endswith('/') and not env.s3_local_path.endswith('/'): env.s3_local_path = env.s3_local_path + '/' env.s3_remote_path = remote_path % env print('Syncing %s to %s...' % (env.s3_local_path, env.s3_remote_path)) # Old buggy Ruby version. # cmd = ('export AWS_ACCESS_KEY_ID=%(AWS_ACCESS_KEY_ID)s; '\ # 'export AWS_SECRET_ACCESS_KEY=%(AWS_SECRET_ACCESS_KEY)s; '\ # 's3sync --recursive --verbose --progress --public-read '\ # '%(s3_local_path)s %(s3_remote_path)s') % env # Superior Python version. if force: env.s3_sync_cmd = 'put' else: env.s3_sync_cmd = 'sync' cmd = ( 'export AWS_ACCESS_KEY_ID=%(AWS_ACCESS_KEY_ID)s; '\ 'export AWS_SECRET_ACCESS_KEY=%(AWS_SECRET_ACCESS_KEY)s; '\ 's3cmd %(s3_sync_cmd)s --progress --acl-public --guess-mime-type --no-mime-magic '\ '--delete-removed --cf-invalidate --recursive %(s3_sync_force_flag)s '\ '%(s3_local_path)s %(s3_remote_path)s') % env if is_local: local_or_dryrun(cmd) else: run_or_dryrun(cmd)