def __init__(self, container_maps=None, docker_client=None, clients=None, **kwargs): all_maps = container_maps or env.get('docker_maps', ()) if not isinstance(all_maps, (list, tuple)): env_maps = all_maps, else: env_maps = all_maps all_configs = clients or env.get('docker_clients', dict()) current_clients = dict() default_client = docker_client or _get_default_config(all_configs) for c_map in env_maps: map_clients = set(c_map.clients) for config_name, c_config in c_map: if c_config.clients: map_clients.update(c_config.clients) for map_client in map_clients: if map_client not in current_clients: client_config = all_configs.get(map_client) if not client_config: raise ValueError("Client '{0}' used in map '{1}' not configured.".format(map_client, c_map.name)) client_host = client_config.get('fabric_host') if not client_host: raise ValueError("Client '{0}' is configured, but has no 'fabric_host' definition.".format(map_client)) current_clients[map_client] = client_config if not (default_client or clients): default_client = self.configuration_class() super(ContainerFabric, self).__init__(container_maps=all_maps, docker_client=default_client, clients=current_clients, **kwargs)
def add_to_bacula_master(shortname=None, path=None, bacula_host_string=None): """Add this server's Bacula client configuration to Bacula master.""" opts = dict( shortname=shortname or env.get("shortname") or err("env.shortname must be set"), path=path or env.get("path") or err("env.path must be set"), bacula_host_string=bacula_host_string or env.get("bacula_host_string") or err("env.bacula_host_string must be set"), ) with settings(host_string=opts["bacula_host_string"]): # upload project-specific configuration upload_template( "%(path)s/etc/bacula-client.conf" % opts, "/etc/bacula/clients/%(shortname)s.conf" % opts, use_sudo=True ) # Create a file that will contain a list of files to backup for this # server (a fileset) - this file is updated automatically by every # project installed on this server (check add_files_to_backup in # project.py) fileset_path = "/etc/bacula/clients/%(shortname)s-fileset.txt" % opts if not exists(fileset_path): sudo("touch %s" % fileset_path) sudo("chown bacula %s" % fileset_path) # reload bacula master configuration sudo("service bacula-director restart")
def push(): """ Push image to registry and cleanup previous release """ deploy_version = _read_tag() # Delete previous master from ECR try: delete_args = { "region": env.get("AWS_REGION", "us-east-1"), "profile": env.get("AWS_PROFILE") } delete_command = "aws ecr batch-delete-image --repository-name %s \ --image-ids imageTag=%s" % (env.IMAGE_NAME, env.RELEASE_TAG) for arg in delete_args: if not delete_args[arg]: continue delete_command += " --%s=%s" % (arg, delete_args[arg]) local(delete_command) except: print("Remote image tag not found") # Push image to repository local("docker push %s:%s" % (env.WEB_REPOSITORY, deploy_version)) local("docker push %s:%s" % (env.WEB_REPOSITORY, env.RELEASE_TAG))
def upload_blobs(prod_user=None, path=None): """Upload BLOB part of Zope's data to the server.""" opts = dict( prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd() ) if not env.get('confirm'): confirm("This will destroy all current BLOB files on the server. " \ "Are you sure you want to continue?") with cd('/home/%(prod_user)s/niteoweb.%(shortname)s/var' % opts): # backup current BLOBs if exists('blobstorage'): sudo('mv blobstorage blobstorage.bak') # remove temporary BLOBs from previous uploads if exists('/tmp/blobstorage'): sudo('rm -rf /tmp/blobstorage') # upload BLOBs to the server and move them to their place rsync_project('/tmp', local_dir='%(path)s/var/blobstorage' % opts) sudo('mv /tmp/blobstorage ./') sudo('chown -R %(prod_user)s:%(prod_user)s blobstorage' % opts) sudo('chmod -R 700 blobstorage')
def configure_hetzner_backup(duplicityfilelist=None, duplicitysh=None): """Hetzner gives us 100GB of backup storage. Let's use it with Duplicity to backup the whole disk.""" opts = dict( duplicityfilelist=duplicityfilelist or env.get('duplicityfilelist') or '%s/etc/duplicityfilelist.conf' % os.getcwd(), duplicitysh=duplicitysh or env.get('duplicitysh') or '%s/etc/duplicity.sh' % os.getcwd(), ) # install duplicity and dependencies sudo('add-apt-repository ppa:duplicity-team/ppa') sudo('apt-get update') sudo('apt-get -yq install duplicity ncftp') # what to exclude upload_template(opts['duplicityfilelist'], '/etc/duplicityfilelist.conf', use_sudo=True) # script for running Duplicity upload_template(opts['duplicitysh'], '/usr/sbin/duplicity.sh', use_sudo=True) sudo('chmod +x /usr/sbin/duplicity.sh') # cronjob sudo("echo '0 8 * * * root /usr/sbin/duplicity.sh' > /etc/cron.d/duplicity ") if not env.get('confirm'): confirm("You need to manually run a full backup first time. Noted?")
def compare_manifest(old): old = old or {} methods = [] pre = [] # Handle SSH key specification change. old_key = (old.get('user_key_type'), old.get('user_key_bits'), old.get('user_key_filename')) new_key = (env.get('user_key_type'), env.get('user_key_bits'), env.get('user_key_filename')) if old_key != new_key: methods.append(QueuedCommand('user.generate_keys', pre=pre)) # Handle username change. force_togroups = False force_passwordless = env.user_passwordless and old.get('user_passwordless') != env.user_passwordless if old.get('user') != env.user: force_togroups = True force_passwordless = env.user_passwordless methods.append(QueuedCommand('user.create', kwargs=dict(username=env.user), pre=pre)) # Handle user group change. if force_togroups or old.get('user_groups') != env.user_groups: methods.append(QueuedCommand('user.togroups', kwargs=dict(user=env.user, groups=env.user_groups), pre=pre)) # Handle switch to passwordless access. #TODO:support different username used for creating passworless user? if force_passwordless: methods.append(QueuedCommand('user.passwordless', kwargs=dict(username=env.user, pubkey=env.key_filename), pre=pre)) #TODO: Handle switch from passwordless access? Remove old SSH key from remote and local caches? return methods
def setup_ubuntu(): default_dist = "maverick" default_version = "10.10" env.sources_file = "/etc/apt/sources.list" version = (env.get("dist_name", default_dist), env.get("dist_version", default_version)) sources = [ "deb http://us.archive.ubuntu.com/ubuntu/ %s universe", "deb-src http://us.archive.ubuntu.com/ubuntu/ %s universe", "deb http://us.archive.ubuntu.com/ubuntu/ %s-updates universe", "deb-src http://us.archive.ubuntu.com/ubuntu/ %s-updates universe", "deb http://us.archive.ubuntu.com/ubuntu/ %s multiverse", "deb-src http://us.archive.ubuntu.com/ubuntu/ %s multiverse", "deb http://us.archive.ubuntu.com/ubuntu/ %s-updates multiverse", "deb-src http://us.archive.ubuntu.com/ubuntu/ %s-updates multiverse", "ppa:sun-java-community-team/sun-java6", # sun-java "deb http://downloads.mongodb.org/distros/ubuntu % s 10gen", # mongodb "deb http://cran.stat.ucla.edu/bin/linux/ubuntu %s/", # lastest R versions "deb http://nebc.nox.ac.uk/bio-linux/ unstable bio-linux", # Bio-Linux "deb http://archive.cloudera.com/debian %s-cdh3 contrib", # Hadoop "deb http://ppa.launchpad.net/freenx-team/ppa/ubuntu lucid main", # FreeNX PPA "deb http://download.virtualbox.org/virtualbox/debian %s contrib", # virtualbox ] env.std_sources = _add_source_versions(version, sources) env.python_version_ext = "" if not env.has_key("java_home"): # XXX look for a way to find JAVA_HOME automatically env.java_home = "/usr/lib/jvm/java-6-openjdk"
def upload_zodb(prod_user=None, path=None): """Upload ZODB part of Zope's data to the server.""" opts = dict( prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd() ) # _verify_env(['prod_user', 'path', ]) if not env.get('confirm'): confirm("This will destroy the current Data.fs file on the server. " \ "Are you sure you want to continue?") with cd('/home/%(prod_user)s/niteoweb.%(shortname)s/var/filestorage' % opts): # remove temporary BLOBs from previous uploads if exists('/tmp/Data.fs'): sudo('rm -rf /tmp/Data.fs') # upload Data.fs to server and set production user as it's owner upload_template( filename='%(path)s/var/filestorage/Data.fs' % opts, destination='Data.fs', use_sudo=True ) sudo('chown -R %(prod_user)s:%(prod_user)s Data.fs' % opts)
def upload_blobs(prod_user=None, path=None, blob_folders=None): """Upload BLOB part of Zope's data to the server.""" opts = dict( prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd(), ) blob_folders = blob_folders or env.get('blob_folders') or ['blobstorage'] confirmed = env.get('confirm') or confirm("This will destroy all current" \ " BLOB files on the server. Are you sure you want to continue?") if not confirmed: return with cd('/home/%(prod_user)s/var' % opts): for folder in blob_folders: opts['folder'] = folder # backup current BLOBs if exists(folder): # remove the previous backup sudo('rm -rf %(folder)s.bak' % opts) # create a backup sudo('mv %(folder)s %(folder)s.bak' % opts) # remove temporary BLOBs from previous uploads if exists('/tmp/%(folder)s' % opts): sudo('rm -rf /tmp/%(folder)s' % opts) # upload BLOBs to the server and move them to their place rsync_project('/tmp', local_dir='%(path)s/var/%(folder)s' % opts) sudo('mv /tmp/%(folder)s ./' % opts) sudo('chown -R %(prod_user)s:%(prod_user)s %(folder)s' % opts) sudo('chmod -R 700 %(folder)s' % opts)
def debootstrap(release= None, mirror= None, target_arch= None): """ Debootstraps debian based image :param release: [Debian](http://www.debian.org/releases/)/[Ubuntu](https://wiki.ubuntu.com/DevelopmentCodeNames) release name :type release: str :param mirror: Url of the mirror (default http://de.archive.ubuntu.com/ubuntu/") :type mirror: str :param target_arch: architecture name like x86 or amd64 :type target_arch: str """ opts = dict( release= release or env.get("release") or "oneiric", mirror= mirror or env.get("mirror") or "http://de.archive.ubuntu.com/ubuntu/", target_arch= target_arch or env.get("target_arch") or "amd64" ) with root(): opts["target"]= "debootstrap/%(release)s_%(target_arch)s" % opts if not exists(opts["target"]): run("mkdir -p %s" %opts["target"]) puts("""Debootstraping release=%(release)s target=%(target)s mirror=%(mirror)s target_arch=%(target_arch)s to %(target)s""" % opts) sudo("debootstrap --arch %(target_arch)s %(release)s %(target)s %(mirror)s" % opts)
def rsync_update_from_git(): """ Update project via rsync based on settings from \ rsync_deploy.setup_rsync(), git_deploy.setup_git() \ and project.setup_project_path() """ if not env.get('git_url'): raise AssertionError('Please set env.git_url.') # create a temporary directory name tmp_dir = datetime.utcnow().strftime('%Y%m%d%H%M%S') # the code below really should be in a well-grained try-except block # to cleanup tmp_dir directory # and clone to it local('git clone %(git_url)s .{0}'.format(tmp_dir) % env) # do branch switching and pull changes from it, if needed with lcd('.{0}'.format(tmp_dir)): if env.get('git_branch'): local('git checkout %(git_branch)s' % env) elif env.get('git_tag'): local('git checkout %(git_tag)s' % env) local('git pull') # do rsync _old_local = env.rsync_local env.rsync_local = os.path.join('.{0}'.format(tmp_dir), '*') rsync_update_project() env.rsync_local = _old_local # cleanup the temporary dir local('rm -rf .{0}'.format(tmp_dir))
def query(query, use_sudo=True, **kwargs): """ Run a MySQL query. """ func = use_sudo and run_as_root or run user = kwargs.get('mysql_user') or env.get('mysql_user') password = kwargs.get('mysql_password') or env.get('mysql_password') mysql_host = kwargs.get('mysql_host') or env.get('mysql_host') defaults_extra_file = kwargs.get('mysql_defaults_extra_file') or env.get('mysql_defaults_extra_file') options = [] if defaults_extra_file: options.append('--defaults-extra-file=%s' % quote(defaults_extra_file)) if user: options.append('--user=%s' % quote(user)) if password: options.append('--password=%s' % quote(password)) if mysql_host: options.append('--host=%s' % quote(mysql_host)) options.extend([ '--batch', '--raw', '--skip-column-names', ]) options = ' '.join(options) return func('mysql %(options)s --execute=%(query)s' % { 'options': options, 'query': quote(query), })
def install_mysql(default_password=None): """Install MySQL database server.""" opts = dict( default_password=default_password or env.get('default_password') or 'secret' ) # first set root password in advance so we don't get the package # configuration dialog sudo('echo "mysql-server-5.0 mysql-server/root_password password %(default_password)s" | debconf-set-selections' % opts) sudo('echo "mysql-server-5.0 mysql-server/root_password_again password %(default_password)s" | debconf-set-selections' % opts) # install MySQL along with php drivers for it sudo('sudo apt-get -yq install mysql-server mysql-client') if not env.get('confirm'): confirm("You will now start with interactive MySQL secure installation." " Current root password is '%(default_password)s'. Change it " "and save the new one to your password managere. Then answer " "with default answers to all other questions. Ready?" % opts) sudo('/usr/bin/mysql_secure_installation') # restart mysql and php-fastcgi sudo('service mysql restart') sudo('/etc/init.d/php-fastcgi restart') # configure daily dumps of all databases sudo('mkdir /var/backups/mysql') password = prompt('Please enter your mysql root password so I can configure daily backups:') sudo("echo '0 7 * * * mysqldump -u root -p%s --all-databases | gzip > /var/backups/mysql/mysqldump_$(date +%%Y-%%m-%%d).sql.gz' > /etc/cron.d/mysqldump" % password)
def upload_template_jinja2(template, destination, use_sudo=True): """ Uploads template using jinja2 :param template: Local path to template :type template: str :param destination: Remote destination path :type destination: str :param use_sudo: Should we use sudo :type use_sudo: bool :returns: Whatever upload_template returns """ env.get("path") or err("env.path not set") jenv= Environment(loader=FileSystemLoader(env.path)) jenv.globals["get_envvar"]= get_envvar text= jenv.get_template(template).render(**env.get("settings") or {}) put( local_path=StringIO(text), remote_path= destination, use_sudo=use_sudo )
def _set_target(target_type): """Set host to staging""" env.hosts = [env.get('%s_host' % target_type) or _err('%s need to be set' % '%s_host' % target_type)] env.user = env.get('%s_user' % target_type) or 'plone' env.code_dir = (env.get('%s_dir' % target_type) or _err('%s_dir need to be set' % target_type))
def check_stage_and_role(): stage = env.get('stage') role = env.get('role') # raise error when env/role not set both if not stage or not role: abort('stage or role not set!')
def install_plugins( name='' ): """ Installs plugins and initialize according to the settings.json file. :param name: This is an argument for install one specific plugin if this is null install all plugins """ require('public_dir', 'wpworkflow_dir') check_plugins() # Install all plugins if( not name ): print "Installing plugins..." for custom_plugin in env.get("custom_plugins", []): install_custom_plugin( custom_plugin ) # Installs 3rd party plugins with cd(env.public_dir): for plugin in env.get("plugins", []): install_plugin( plugin ) # Install one plugin else: flag, plugin_type, plugin_data = check_if_plugin_exist( name ) if flag: if plugin_type == 1: # Custom plugin install install_custom_plugin( plugin_data ) if plugin_type == 2: # Plugin install install_plugin( plugin_data ) else: print "The plugin: " + blue( name, bold=True) + "does not find in setting.json ..."
def hammer(command): """Run hammer -u <admin_user> -p <admin_password> --output json <command>. This method has a dependency on set_hammer_config function. :param str command: The hammer subcommand to run. :return: Return a JSON decoded object containing the result of the command. The returned object will exhibit ``failed`` and ``succeeded`` boolean attributes specifying whether the command failed or succeeded, and will also include the return code as the ``return_code`` attribute. """ command_result = run( 'hammer --username {0} --password {1} --output json {2}' .format(env.get('hammer_user'), env.get('hammer_password'), command), quiet=True ) try: data = json.loads(command_result) except ValueError: data = command_result result = _lower_dict_keys(data) if isinstance(result, list): result = _AttributeList(result) elif isinstance(result, dict): result = _AttributeDict(result) result.succeeded = command_result.succeeded result.failed = command_result.failed result.return_code = command_result.return_code return result
def check_if_plugin_exist( name ): print "Check if " + name + " exists in settings.json" """ This function chek if a plugin exist within settings.json file This function returns: bolean True if plugin exists integer 0 for null | 1 for custom_plugin | 2 for plugin dic setings.json plugin data """ flag = False plugin_type = 0 plugin_data = "" custom_plugins = env.get("custom_plugins", []) plugins = env.get("plugins") for custom_plugin in custom_plugins: if name == custom_plugin['name']: flag = True plugin_type = 1 plugin_data = custom_plugin for custom_plugin in plugins: if name == custom_plugin['name']: flag = True plugin_type = 2 plugin_data = custom_plugin return ( flag, plugin_type, plugin_data )
def render_template(filename, remote_path=None, context=None, use_sudo=False): key = 'tdm{0}'.format(env.host_string) if not env.get(key): sudo('mkdir -p {0}'.format(env.base_remote_path)) sudo('mkdir -p {0}'.format(env.configs_path)) sudo('chown {0} {1}'.format(env.user, env.configs_path)) sudo('chown {0} {1}'.format(env.user, env.base_remote_path)) env[key] = True if not remote_path: remote_path = env.configs_path else: if not os.path.isabs(remote_path): remote_path = os.path.join(env.configs_path, remote_path) basename = os.path.basename(remote_path) if not basename or not '.' in basename: dest_path = os.path.join(remote_path, filename) else: dest_path = remote_path bkey = '{0}{1}'.format(env.host_string, os.path.dirname(dest_path)) if not env.get(bkey): # Nake sure dir exists run('mkdir -p {0}'.format(os.path.dirname(dest_path))) env[bkey] = True # Render template template = template_to_string(filename, context=context) put(local_path=io.StringIO(template), remote_path = dest_path, use_sudo=use_sudo) return dest_path
def up(typ, count, bid=None): conn = _get_connection(env) max_ind = _get_max_index(conn, typ) groups = filter(None, env.group.split(',')) or None if bid is not None: results = conn.request_spot_instances( image_id=env.image, price=bid, count=int(count), key_name=env.key_pair, security_groups=groups, instance_type=env.instance, placement=env.get('zone'), subnet_id=env.get('subnet')) printer = _print_bid_requests else: reservation = conn.run_instances( image_id=env.image, min_count=int(count), max_count=int(count), key_name=env.key_pair, security_groups=groups, instance_type=env.instance, placement=env.get('zone'), subnet_id=env.get('subnet')) results = reservation.instances printer = _print_instances max_ind = _get_max_index(conn, typ) for r in results: max_ind += 1 r.add_tag('Name', _encode_nodename(typ, max_ind)) printer(results)
def login(self, **kwargs): """ Identical to :meth:`dockermap.map.base.DockerClientWrapper.login` with two enhancements: * additional logging; * login parameters can be passed through ``kwargs``, or set as default using the following ``env`` variables: * ``env.docker_registry_user`` (kwarg: ``username``), * ``env.docker_registry_password`` (kwarg: ``password``), * ``env.docker_registry_mail`` (kwarg: ``email``), * ``env.docker_registry_repository`` (kwarg: ``registry``), * ``env.docker_registry_insecure`` (kwarg: ``insecure_registry``). """ c_user = kwargs.pop('username', env.get('docker_registry_user')) c_pass = kwargs.pop('password', env.get('docker_registry_password')) c_mail = kwargs.pop('email', env.get('docker_registry_mail')) c_registry = kwargs.pop('registry', env.get('docker_registry_repository')) c_insecure = kwargs.pop('insecure_registry', env.get('docker_registry_insecure')) if super(DockerFabricClient, self).login(c_user, password=c_pass, email=c_mail, registry=c_registry, insecure_registry=c_insecure, **kwargs): self.push_log("Login at registry '{0}' succeeded.".format(c_registry)) return True self.push_log("Login at registry '{0}' failed.".format(c_registry)) return False
def root(): if not env.get("noroot"): root= env.get("root") or "ubuntu" if not exists(root): run("mkdir -p %s" %root) env.noroot= True return cd(root) return cd(".")
def deploy(actor=None): """ Deploy current master to production server """ opts = dict( sitename=env.get('sitename'), hostname=env.get('hostname'), actor=actor or env.get('actor') or getpass.getuser(), ) project.site.update()
def run(self, **kwargs): """ """ assert not env.hosts if not env.get('joyent_account'): print "To use the joyent api you must add a joyent_account value to your env" sys.exit(1) setup_name = 'setup.%s' % kwargs.get('type') task = functions.get_task_instance(setup_name) default_dataset = DEFAULT_DATASET default_package = DEFAULT_PACKAGE if task: if hasattr(task, 'dataset'): default_dataset = task.dataset if hasattr(task, 'server_size'): default_package = task.server_size else: print "I don't know how to add a %s server" % kwargs.get('type') sys.exit(1) location = kwargs.get('data_center') if not location and env.get('joyent_default_data_center'): location = env.joyent_default_data_center elif not location: print "You must supply an data_center argument or add a joyent_default_data_center attribute to your env" sys.exit(1) key_name = raw_input('Enter your ssh key name: ') key_id = '/%s/keys/%s' % ( env.joyent_account, key_name) sdc = DataCenter(location=location, key_id=key_id) name = functions.get_remote_name(None, task.config_section, name=kwargs.get('name')) new_args = { 'name' : name, 'dataset' : kwargs.get('data_set', default_dataset), 'metadata' : kwargs.get('metadata', {}), 'tags' : kwargs.get('tags', {}), 'package' : kwargs.get('package', default_package) } machine = sdc.create_machine(**new_args) public_ip = machine.public_ips[0] print "added machine %s" % public_ip host_string = 'admin@%s' % public_ip print "waiting for machine to be ready" while machine.status() != 'running': print '.' time.sleep(5) print 'done' execute(setup_name, name=name, hosts=[host_string])
def run(self, **kwargs): """ """ assert not env.hosts if not env.get("joyent_account"): print "To use the joyent api you must add a joyent_account value to your env" sys.exit(1) setup_name = "setup.%s" % kwargs.get("type") task = functions.get_task_instance(setup_name) default_dataset = DEFAULT_DATASET default_package = DEFAULT_PACKAGE if task: if hasattr(task, "dataset"): default_dataset = task.dataset if hasattr(task, "server_size"): default_package = task.server_size else: print "I don't know how to add a %s server" % kwargs.get("type") sys.exit(1) location = kwargs.get("data_center") if not location and env.get("joyent_default_data_center"): location = env.joyent_default_data_center elif not location: print "You must supply an data_center argument or add a joyent_default_data_center attribute to your env" sys.exit(1) key_name = raw_input("Enter your ssh key name: ") key_id = "/%s/keys/%s" % (env.joyent_account, key_name) allow_agent = env.get("allow_agent", False) sdc = DataCenter(location=location, key_id=key_id, allow_agent=allow_agent) name = functions.get_remote_name(None, task.config_section, name=kwargs.get("name")) new_args = { "name": name, "dataset": kwargs.get("data_set", default_dataset), "metadata": kwargs.get("metadata", {}), "tags": kwargs.get("tags", {}), "package": kwargs.get("package", default_package), } machine = sdc.create_machine(**new_args) public_ip = machine.public_ips[0] print "added machine %s" % public_ip host_string = "admin@%s" % public_ip print "waiting for machine to be ready" while machine.status() != "running": print "." time.sleep(5) print "done" execute(setup_name, name=name, hosts=[host_string])
def deploy(self, node, first_run=False): logger.info("Deploying to instance: %s" % node) self.wait_until_created(node) # RDS vs non-RDS configuration env.db_host = 'localhost' env.db_master_user = '******' env.db_master_password = '' rds_node = self._get_masterdb() if rds_node: env.db_host, _ = rds_node.boto_instance.endpoint # (Host, Port) env.db_master_user = rds_node.boto_instance.master_username env.db_master_password = env.rds_master_password # Set fab parameters based on the ec2 instance env.hosts = [node.boto_instance.public_dns_name] env.host_string = node.boto_instance.public_dns_name env.host = node.boto_instance.public_dns_name env.hostname = '%s-%s' % (env.get('env'), env.get('node_name')) env.ec2_instance_id = node.boto_instance.id if first_run: ebs_confs = self.ebs_confs ebs_debug = ebs_confs.get('debug', {}) if ebs_debug.get('attach_ebs_volumes', True): # Create the .ec2tools configuration for snapshotting/backups self._configure_ec2tools( self.deployment.ec2conn, node.boto_instance.id, self.ebs_confs['vols']) if ebs_confs.get('do_snapshot_backups', False): self._configure_ebs_backup( '/opt/pstat/versions/current/pstat', '/home/policystat/env/bin/python', '/vol/fs/pstat_storage', ) else: logger.warning( "Debug ebs_conf found with 'attach_ebs_volumes' False") logger.warning("Not configuring ec2tools or backups.") # Fix file permissions logger.info( "Fixing file permissions and starting stopped services") self.provisioner.stop_services() self.provisioner.fix_folder_perms() self.provisioner.start_services() node_role_map = self._get_node_role_map() self.provisioner.do_update( node_role_map=node_role_map, node_roles=node._deployment_info.get('roles', []), first_run=not self.initial_deploy_complete, )
def deploy_template(env): if env.get('template'): run("mkdir -p %s" % env.template_repo) run("ls -al %s" % env.template_repo) run("rm -rf %s" % env.template_repo) if env.get('template_branch'): run("git clone -b %s %s %s" % (env.get('template_branch'), env.get('template'), env.template_repo)) else: run("git clone %s %s" % (env.get('template'), env.template_repo))
def run(self, name): env.roles = ['testserver'] self.env = env self.instance_name = name self.instance_dir = self.env.site_root + self.instance_name self.virtualenv_name = env.get('virtualenv_name', 'virtualenv') self.run_migrations = env.get('run_migrations', True) self.trusted_pypi_hosts = env.get('trusted_pypi_hosts', DEFAULT_TRUSTED_HOSTS) self.update_test_instance()
def run(self, db_name): self.env = env self.username = env.get('db_user', 'ngdm_wpf') self.port = env.get('db_port', '5432') self.db_name = db_name run("createdb %s -h localhost -U postgres " "--lc-collate 'en_US.UTF-8' --lc-ctype 'en_US.UTF-8' " "-E UTF8 -O %s -p %s" % (db_name, self.username, self.port)) self._post_database_setup()
def func_with_setup(*args, **kwargs): # If `s:server` was run before the current command - then we should copy values to # `env`. Otherwise, hosts were passed through command line with `fab -H host1,host2 # command` and we skip. if env.get("group", None): for key, val in env.group[env.host].items(): setattr(env, key, val) if fabric.state.output['debug']: puts("[env] %s : %s" % (key, val)) func(*args, **kwargs)
def execute(task): """ Perform vagrant checking before executing task """ # setup env from a VM name if env.get('vagrant', False): # Use KeyError to fail - ip_map is necessary ip_map = env['vagrant']['ip_map'] _vagrant(ip_map[env.host]) _execute(task)
def query(query, use_sudo=True, **kwargs): """ Run a MySQL query. """ family = distrib_family() if family == 'debian': from fabtools.deb import install, is_installed elif family == 'redhat': from fabtools.rpm import install, is_installed else: raise UnsupportedFamily(supported=['debian', 'redhat']) func = use_sudo and run_as_root or run user = kwargs.get('mysql_user') or env.get('mysql_user') password = kwargs.get('mysql_password') or env.get('mysql_password') func_mysql = 'mysql' mysql_host = kwargs.get('mysql_host') or env.get('mysql_host') options = [ '--batch', '--raw', '--skip-column-names', ] if user: options.append('--user=%s' % quote(user)) if password: if not is_installed('sshpass'): install('sshpass') func_mysql = 'sshpass -p %(password)s mysql' % {'password': password} options.append('--password') options.append('--password=%s' % quote(password)) if mysql_host: options.append('--host=%s' % quote(mysql_host)) options = ' '.join(options) return func('%(cmd)s %(options)s --execute=%(query)s' % { 'cmd': func_mysql, 'options': options, 'query': quote(query), })
def hammer(command, output='json'): """Run hammer -u <admin_user> -p <admin_password> --output <output> <command>. This method has a dependency on set_hammer_config function. :param str command: The hammer subcommand to run :param str output: The command output type which hammer supports, by default json :return: if output is json, then returns a JSON decoded object containing the result of the command. The returned object will exhibit ``failed`` and ``succeeded`` boolean attributes specifying whether the command failed or succeeded, and will also include the return code as the ``return_code`` attribute. Else, returns a string of given output type representation of hammer command output. """ output = output.lower() command_result = run( 'hammer --username {0} --password {1} --output {2} {3}'.format( env.get('hammer_user'), env.get('hammer_password'), output, command), quiet=True) if output == 'json': try: data = json.loads(command_result) except ValueError: data = command_result result = _lower_dict_keys(data) if isinstance(result, list): result = _AttributeList(result) elif isinstance(result, dict): result = _AttributeDict(result) result.succeeded = command_result.succeeded result.failed = command_result.failed result.return_code = command_result.return_code return result elif output in ['base', 'table', 'silent', 'csv', 'yaml', 'json']: return command_result else: raise ValueError('Invalid output type \'{}\' has provided to get ' 'hammer output.'.format(output))
def get_template_context(self): key_id = run("ssh-keygen -l -f ~/.ssh/id_rsa.pub | awk '{print $2}' | tr -d '\n'") if not key_id: raise Exception("Couldn't get key id") context = super(Backups, self).get_template_context() context['account'] = env.joyent_account context['key_id'] = key_id context['mail'] = env.get('mail', {}) context['mail']['subject'] = '{0} {1} backups'.format(env.host_string, self.bucket) return context
def current_role(): host_string = env.get('host_string') if host_string: for role_name, role_def in env['roledefs'].items(): if role_name.startswith('all'): continue for role_host in role_def['hosts']: if role_host in host_string: return role_name raise ValueError( "Host '{}' not found in role definitions.".format(host_string))
def deploy_static(): require('path') try: run_or_sudo('%(path)s/venv/bin/python %(path)s/releases/%(release)s/manage.py collectstatic --noinput' % env) except KeyError: run_or_sudo('%(path)s/venv/bin/python %(path)s/releases/current/manage.py collectstatic --noinput' % env) if env.get('compress_offline', True): try: run_or_sudo('%(path)s/venv/bin/python %(path)s/releases/%(release)s/manage.py compress' % env) except KeyError: run_or_sudo('%(path)s/venv/bin/python %(path)s/releases/current/manage.py compress' % env)
def install(): """ Install Puppet and its configs without any agent or master. """ sudo('apt-get update -qq') with settings(warn_only=True): sudo('apt-get -y -q install rubygems') sudo('apt-get -y -q install ruby ruby-dev git build-essential') puppet_version = env.get('loom_puppet_version') sudo(_gem_install('puppet', version=puppet_version)) librarian_version = env.get('loom_librarian_version') sudo(_gem_install('librarian-puppet', version=librarian_version)) # http://docs.puppetlabs.com/guides/installation.html sudo('puppet resource group puppet ensure=present') sudo( "puppet resource user puppet ensure=present gid=puppet shell='/sbin/nologin'" ) execute(update_configs)
def template_to_string(filename, context=None): local_path = os.path.join(env.deploy_path, 'templates') platform = os.path.join(env.configs_dir, 'templates', env.get('current_platform', 'base')) base = os.path.join(env.configs_dir, 'templates', 'base') all_templates = os.path.join(env.configs_dir, 'templates') search_paths = (local_path, platform, base, all_templates) envi = Environment(loader=FileSystemLoader(search_paths)) context = get_context(context) template = (envi.get_template(filename)).render(**context) return template
def created_key(self, pub_key_path): key_name = raw_input('Enter your ssh key name: ') key_id = '/%s/keys/%s' % ( env.joyent_account, key_name) allow_agent = env.get('allow_agent', False) sdc = DataCenter(key_id=key_id, allow_agent=allow_agent) with tempfile.TemporaryFile() as f: get(pub_key_path, f) f.seek(0) data = f.read() sdc.add_key(env.host_string, data)
def set_system_time(timezone=None): """Set timezone and install ``ntp`` to keep time accurate.""" opts = dict( timezone=timezone or env.get('timezone') or '/usr/share/zoneinfo/UTC', ) # set timezone sudo('cp %(timezone)s /etc/localtime' % opts) # install NTP sudo('apt-get -yq install ntp')
def collect_static_files(self): if 'before_collect_static_files' in env: env.before_collect_static_files(env, *self.task_args, **self.task_kwargs) print(green('\nCollecting static files.')) create_symbolic_links = env.get('collect_static_link', True) commands.collect_static(self.virtualenv_path, self.source_path, create_symbolic_links) if 'after_collect_static_files' in env: env.after_collect_static_files(env, *self.task_args, **self.task_kwargs)
def raid_monitoring(email=None): """Configure monitoring of our RAID-1 field. If anything goes wrong, send an email!""" opts = dict(email=email or env.get('email') or err('env.email must be set'), ) # enable email notifications from mdadm raid monitor append('/etc/mdadm/mdadm.conf', 'MAILADDR %(email)s' % opts, use_sudo=True) # enable email notification for SMART disk monitoring sudo('apt-get -yq install smartmontools') uncomment('/etc/default/smartmontools', '#start_smartd=yes', use_sudo=True)
def pip_requirements(): """ Install project requirements using PIP into a Python virtual environment. """ require( "virtualenv_path", "requirements_path", "http_proxy", "https_proxy", "sudo_user", ) cmd = "pip install --quiet --requirement %s" % env.requirements_path # append packages url if specified if env.get("packages_url") is not None: cmd += " -f %s" % env.get("packages_url") with context_managers.proxy(env.http_proxy, env.https_proxy): with context_managers.virtualenv(env.virtualenv_path): sudo(cmd, user=env.sudo_user)
def distro_info(): """Task which figures out the distro information based on the /etc/redhat-release file A ``(distro, major_version)`` tuple is returned if called as a function. For RHEL X.Y.Z it will return ``('rhel', X)``. For Fedora X it will return ``('fedora', X)``. Be aware that the major_version is an integer. """ # Create/manage host cache cache = env.get('distro_info_cache') host = env['host'] if cache is None: cache = env['distro_info_cache'] = {} if host not in cache: # Grab the information and store on cache release_info = run('cat /etc/redhat-release', quiet=True) if release_info.failed: print('Failed to read /etc/redhat-release file') sys.exit(1) # Discover the distro if release_info.startswith('Red Hat Enterprise Linux'): distro = 'rhel' elif release_info.startswith('Fedora'): distro = 'fedora' else: distro = None # Discover the version match = re.search(r' ([0-9.]+) ', release_info) if match is not None: parts = match.group(1).split('.') # extract the major version major_version = int(parts[0]) # extract the minor version if len(parts) > 1: minor_version = int(parts[1]) else: minor_version = None else: major_version = minor_version = None if distro is None or major_version is None: print('Was not possible to fetch distro information') sys.exit(1) cache[host] = distro, major_version, minor_version distro, major_version, minor_version = cache[host] print('{0} {1} {2}'.format(distro, major_version, minor_version)) return distro, major_version, minor_version
def __init__(self, cmd_prefix=None, default_bin=None, base_url=None, tls=None, use_sudo=None, debug=None): super(DockerCliClient, self).__init__() base_url = base_url or env.get('docker_base_url') if base_url: cmd_args = ['-H {0}'.format(base_url)] else: cmd_args = [] if tls or (tls is None and env.get('docker_tls')): cmd_args.append('--tls') self._out = DockerCommandLineOutput( cmd_prefix or env.get('docker_cli_prefix'), default_bin or env.get('docker_cli_bin', 'docker'), cmd_args or None) if use_sudo or (use_sudo is None and env.get('docker_cli_sudo')): self._call_method = sudo else: self._call_method = run self._quiet = not (debug or (debug is None and env.get('docker_cli_debug'))) self.api_version = None self._update_api_version()
def download_code(shortname=None, prod_user=None, svn_params=None, svn_url=None, svn_repo=None, svn_dir=None): """Pull project code from code repository.""" opts = dict( shortname=shortname or env.get('shortname'), prod_user=prod_user or env.get('prod_user'), ) more_opts = dict( svn_params=svn_params or env.get('svn_params') or '--force --no-auth-cache', svn_url=svn_url or env.get('svn_url') or 'https://niteoweb.repositoryhosting.com/svn', svn_repo=svn_repo or env.get('svn_repo') or 'niteoweb_%(shortname)s' % opts, svn_dir=svn_dir or env.get('svn_dir') or 'niteoweb.%(shortname)s/trunk' % opts, ) opts.update(more_opts) with cd('/home/%(prod_user)s' % opts): sudo( 'svn export %(svn_params)s %(svn_url)s/%(svn_repo)s/%(svn_dir)s ./' % opts, user=opts['prod_user'])
def deploy_ref(deployment_name, ref): setup_env(deployment_name) with cd(env.kc_path): run("git fetch --all --tags") # Make sure we're not moving to an older codebase git_output = run_no_pty( 'git rev-list {}..HEAD --count 2>&1'.format(ref)) if int(git_output) > 0: raise Exception("The server's HEAD is already in front of the " "commit to be deployed.") # We want to check out a specific commit, but this does leave the HEAD # detached. Perhaps consider using `git reset`. run('git checkout {}'.format(ref)) # Report if the working directory is unclean. git_output = run_no_pty('git status --porcelain') if len(git_output): run('git status') print('WARNING: The working directory is unclean. See above.') deploy_template(env) run('find . -name "*.pyc" -exec rm -rf {} \;') run('find . -type d -empty -delete') # numpy pip install from requirements file fails with kobo_workon(env.kc_virtualenv_name): run("pip install numpy") run("pip install --upgrade -r %s" % env.pip_requirements_file) formpack_path = os.path.join(env.home, 'formpack') formpack_branch = env.get('formpack_branch', False) run("[ -d {fp} ] || git clone https://github.com/kobotoolbox/formpack.git " "{fp}".format(fp=formpack_path)) with cd(formpack_path): with kobo_workon(env.kc_virtualenv_name): if formpack_branch: run("git checkout {b} && git pull origin {b}".format( b=formpack_branch)) run("python setup.py develop") with cd(os.path.join(env.kc_path, "onadata", "static")): run("date > LAST_UPDATE.txt") with cd(env.kc_path): with kobo_workon(env.kc_virtualenv_name): run("python manage.py syncdb") run("python manage.py migrate") run("python manage.py collectstatic --noinput") run("sudo restart kc_celeryd") run("sudo restart uwsgi")
def circus_pkg(update=False): """ Installs packages relatives to circus """ # install ubuntu ppa for libzmq-dev if ubuntu <= 10.04 if fabtools.system.distrib_id( ) == 'Ubuntu' and fabtools.system.distrib_release() == '10.04': fabtools.require.deb.packages(['python-software-properties'], update=update) fabtools.require.deb.ppa('ppa:chris-lea/zeromq') fabtools.require.deb.ppa('ppa:chris-lea/libpgm') fabtools.require.deb.packages(['libzmq-dev', 'libevent-dev'], update=update) # not used anymore installed in venv ! fabtools.require.python.install(env.get('circus_package_name', 'circus'), use_sudo=True, upgrade=update) if 'no_circus_web' not in env or not env.no_circus_web: fabtools.require.python.install('circus-web', use_sudo=True, upgrade=update) fabtools.require.python.install('gevent', use_sudo=True, upgrade=update) # install circus backend sets in fabfile if 'circus_backend' in env: fabtools.require.python.install(env.circus_backend, use_sudo=True, upgrade=update) # base configuration file for circus fabtools.files.upload_template('circus.ini.tpl', os.path.join(env.remote_home, '.circus.ini'), context=env, template_dir=os.path.join( env.lib_path, 'templates'), use_sudo=True, user=env.remote_owner, chown=True, mode='644', use_jinja=True) # root directory for circus applications configuration fabtools.require.files.directory(path=os.path.join(env.remote_home, '.circus.d'), use_sudo=True, owner=env.remote_owner, group=env.remote_group, mode='750')
def clone_or_update(target, repo): """Ensure that a directory contains an up-to-date git clone. `target` is the directory where the clone should live `repo` is the git URL to clone if needed `branch` is the branch to check out. Default 'master'. """ if remotefile.exists(target + '/.git', verbose=env.get('verbose', False)): with cd(target): run('git fetch') else: run('mkdir -p %s' % target) run('git clone %s %s' % (repo, target))
def manage(cmd=''): """Run a management command in the app directory.""" cmd = cmd or '' while not cmd: sys.stdout.write(cyan("Command to run: ")) cmd = raw_input().strip() sudo( '. /srv/myapp/conf/environ && /srv/myapp/env/bin/python /srv/myapp/releases/{release:s}/manage.py {cmd:s}' .format(cmd=cmd, release=env.get('release', 'current')), user=env.server_owner)
def template_context(vars): """ Compiles a list of variables and their values from Fabric's env into a dictionary which can be used to render a template. Any values that aren't present in env are prompted for. """ context = {} for var in vars: context[var] = \ env.get(var) or prompt('Enter settings var for %r:' % var) return context
def render_settings(): "Render Django settings file" require('target', 'static_root') secrets = { 'db_password': '******', 'postmark_api_key': 'Postmark API Key: ', 'secret_key': 'Project-wide SECRET_KEY: ', } for k, msg in ((k, v) for k, v in secrets.items() if not env.get(k, False)): prompt(msg, key=k) return render_template(join(env.template_dir, 'settings.py'), env)
def load_config(path): if not env.get('load_config'): if is_config_file(path): env_config = EnvConfig() env_config.load_config(path) env_config.pre_init() env['load_config'] = True log("load config '%s' successful!" % path) else: abort("Please specify the configuration file.") else: log("The configuration has been loaded in the task(" + env.command + ")")
def install_sendmail(email=None): """Prepare a localhost SMTP server for sending out system notifications to admins.""" opts = dict(email=email or env.get('email') or err('env.email must be set'), ) # install sendmail sudo( 'apt-get -yq install sendmail sendmail-base sendmail-bin sendmail-cf sensible-mda rmail' ) # all email should be sent to maintenance email append('/etc/aliases', 'root: %(email)s' % opts, use_sudo=True)
def copy_needed_files(): """ Copy needed file from local to remote nodes. """ print(_yellow("--COPYING NEEDED FILES--")) # Copy encrype chef file to default secrect file on server if env.get("encrypted_data_bag_secret_key_path", None): if not exists("/etc/chef"): sudo('mkdir /etc/chef') put(env.encrypted_data_bag_secret_key_path, "/etc/chef/encrypted_data_bag_secret", mode=0600)
def _get_module_obj(self, parent=None, name=None, depth=None): if not depth: depth = 0 depth = depth + 1 obj = super(BaseServer, self)._get_module_obj(parent=parent, name=name, depth=depth) if hasattr(self, 'config_section'): if not env.get('role_name_map'): env.role_name_map = {} env.role_name_map[self.config_section] = obj.__name__ return obj
def upload_zodb(prod_user=None, path=None): """Upload ZODB part of Zope's data to the server.""" opts = dict(prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd()) # _verify_env(['prod_user', 'path', ]) if not env.get('confirm'): confirm("This will destroy the current Data.fs file on the server. " \ "Are you sure you want to continue?") with cd('/home/%(prod_user)s/var/filestorage' % opts): # remove temporary BLOBs from previous uploads if exists('/tmp/Data.fs'): sudo('rm -rf /tmp/Data.fs') # upload Data.fs to server and set production user as it's owner upload_template(filename='%(path)s/var/filestorage/Data.fs' % opts, destination='Data.fs', use_sudo=True) sudo('chown -R %(prod_user)s:%(prod_user)s Data.fs' % opts)
def upload_zodb(prod_user=None, path=None, zodb_files=None): """Upload ZODB part of Zope's data to the server.""" opts = dict( prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd(), ) zodb_files = zodb_files or env.get('zodb_files') or ['Data.fs'] confirmed = env.get('confirm') or confirm("This will destroy the current" \ " zodb file(s) on the server. Are you sure you want to continue?") if not confirmed: return with cd('/home/%(prod_user)s/var/filestorage' % opts): for filename in zodb_files: opts['filename'] = filename # backup current database if exists(filename): # remove the previous backup sudo('rm -rf %(filename)s.bak' % opts) # create a backup sudo('mv %(filename)s %(filename)s.bak' % opts) # remove temporary zodb file(s) from previous uploads if exists('/tmp/%(filename)s' % opts): sudo('rm -rf /tmp/%(filename)s' % opts) # upload zodb file(s)to server and set production user as the owner upload_template(filename='%(path)s/var/filestorage/%(filename)s' % opts, destination=filename, use_sudo=True) sudo('chown -R %(prod_user)s:%(prod_user)s %(filename)s' % opts) if exists('/home/%(prod_user)s/var/filestorage/%(filename)s.bak' % opts): sudo('chown -R %(prod_user)s:%(prod_user)s %(filename)s.bak' % opts)
def config_get_domain(): # DOMAIN for vhosts if env.get('domains'): return False if target.get('domains'): env.domains = target.get('domains') else: env.domains = prompt('Set a list of domain names (comma separated) :') env.domains = env.domains.split(',') for i in env.domains: i = i.strip().lower() env.domain = env.domains[0]