def clean_master(): with settings(warn_only = True), hide('everything'): result = run("killall master") if result.failed: print red("Could not kill master %s!" % env.host) else: print green("Killed master %s!" % env.host)
def _standby_clone(): """ With "node1" server running, we want to use the clone standby command in repmgr to copy over the entire PostgreSQL database cluster onto the "node2" server. """ # manualy: # $ mkdir -p /var/lib/postgresql/9.1/testscluster/ # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/ with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): puts(green('Start cloning the master')) repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env puts(green(repmgr_clone_command)) puts("-" * 40) res = sudo(repmgr_clone_command, user='******') if 'Can not connect to the remote host' in res or 'Connection to database failed' in res: puts("-" * 40) puts(green(repmgr_clone_command)) puts("-" * 40) puts("Master server is %s reachable." % red("NOT")) puts("%s you can try to CLONE the slave manually [%s]:" % (green("BUT"), red("at your own risk"))) puts("On the slave server:") puts("$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid" % env) puts("Here:") puts("$ fab <cluster_task_name> finish_configuring_slave") abort("STOP...")
def _configure_postgresql(env, delete_main_dbcluster=False): """ This method is intended for cleaning up the installation when PostgreSQL is installed from a package. Basically, when PostgreSQL is installed from a package, it creates a default database cluster and splits the config file away from the data. This method can delete the default database cluster that was automatically created when the package is installed. Deleting the main database cluster also has the effect of stopping the auto-start of the postmaster server at machine boot. The method adds all of the PostgreSQL commands to the PATH. """ pg_ver = sudo("dpkg -s postgresql | grep Version | cut -f2 -d':'") pg_ver = pg_ver.strip()[:3] # Get first 3 chars of the version since that's all that's used for dir name got_ver = False while(not got_ver): try: pg_ver = float(pg_ver) got_ver = True except Exception: print(red("Problems trying to figure out PostgreSQL version.")) pg_ver = raw_input(red("Enter the correct one (eg, 9.1; not 9.1.3): ")) if delete_main_dbcluster: env.safe_sudo('pg_dropcluster --stop %s main' % pg_ver, user='******') # Not sure why I ever added this to gvl, doesn't seem needed. -John #_put_installed_file_as_user("postgresql-%s.conf" % env.postgres_version, "/etc/postgresql/%s/main/postgresql.conf" % env.postgres_version, user='******') exp = "export PATH=/usr/lib/postgresql/%s/bin:$PATH" % pg_ver if not contains('/etc/bash.bashrc', exp): append('/etc/bash.bashrc', exp, use_sudo=True)
def load_db(filename=None): """Loads a dump into the database""" env.box_dump_filename = filename if not filename: abort(red('Dump missing. "fab server.load_db:filename"', bold=True)) if not os.path.exists(filename): abort(red('"%(box_dump_filename)s" does not exist.' % env, bold=True)) if not confirm( 'Completely replace the remote database' ' "%(box_database)s" (if it exists)?', default=False): return run( 'psql -c "DROP DATABASE IF EXISTS %(box_database)s"') run( 'createdb %(box_database)s --encoding=UTF8 --template=template0' ' --owner=%(box_database)s') run_local( 'cat %(box_dump_filename)s |' 'ssh %(host_string)s "source .profile && psql %(box_database)s"') run( 'psql %(box_database)s -c "REASSIGN OWNED BY admin ' ' TO %(box_database)s"')
def check_mysql_connection(user, password, host, database, port): """ Check that the MySQL connection works. """ # MySQLdb is installed from the pip file so we don't want to # import it until now. import MySQLdb puts(green("Checking MySQL connection and packages")) try: dbase = MySQLdb.connect( host=host, port=int(port), user=user, passwd=password, db=database) cursor = dbase.cursor() cursor.execute("SELECT VERSION()") results = cursor.fetchone() # Check if anything at all is returned if results: puts(green("MySQL connection successful.")) except: puts(red("ERROR IN CONNECTION")) puts(red("Install cannot continue without valid database connection.")) puts(red("Please verify your database credentials and try again.")) exit() puts(green("You are connected to MySQL Server")) return False
def XXXX_deploy (): """ Deploy the packages in the deployment machines """ print(green("Installing packages at %s" % str(env.host_string))) if confirm(red('Install the packages at the %s?' % (env.host_string)), default = False): print(yellow("... stopping XXXX")) if _exists('/etc/init.d/XXXX'): sudo('service XXXX stop') sudo('rm -f /etc/init.d/XXXX') with cd(env.admin.prefix): print(yellow("... cleaning up old RPMs")) if not _exists('tmp'): run('mkdir tmp') run('rm -rf tmp/*') directory = os.path.join(env.admin.prefix, 'tmp') with cd(directory): print(yellow("... uploading RPMs")) for f in env.packages.rpms: put(os.path.join(directory, f), '.') print(yellow("... installing software")) sudo('yum install -R 2 -q -y --nogpgcheck *.rpm') print(red("... XXXX is STOPPED at %s!" % env.host_string))
def deploy(project = "", restart=False): ## if no project is specified deploy all of them if project == "": for p in PROJECTS: print(p) prepare_deploy(p) if confirm(red("Do you wish to proceed?")): upload(p) deploy_shared_static() # Restart jetty if confirm(red("Do you wish to restart jetty?")): sudo('service jetty restart') else: ## deploy only the specified project print(project) prepare_deploy(project) if confirm(("Do you wish to proceed?")): upload(project) deploy_shared_static() # Restart jetty if confirm(red("Do you wish to restart jetty?")): sudo('service jetty restart')
def set_project(project=None): """ Checks if project is set, and if not it will prompt you to enter a valid project """ if not 'project' in env.params: config_folder = "%s/config" % (env.home_path) available_projects = [] for tmp_project in os.listdir(config_folder): if os.path.isdir(os.path.join(config_folder, tmp_project)): available_projects.append(tmp_project) if len(available_projects) == 0: abort(red("No projects available.")) if not project: print(green("Available projects :")) print("") for project in available_projects: print("- %s" % project) print("") project = prompt('Enter project name : ', default=available_projects[0]) if project not in available_projects: print(red("`%s` is not a valid project !" % project)) set_project() else: env.params['project'] = project
def set_environment(environment=None): """ Checks if environment is set, and if not it will prompt you to enter a valid project """ if not 'environment' in env.params: if len(env.environments) == 0: abort(red("No environmens available.")) if not environment: print(green("Available environments :")) print("") for environment in env.environments: print("- %s" % environment) print("") environment = prompt('Enter environment : ', default=env.environments[0]) if environment not in env.environments: print("") print(red("`%s` is not a valid environment !" % environment)) set_environment() else: # Set environment settings env.params['environment'] = environment config.environment()
def get_hosts_settings(): # Load all the host settings hosts = json.loads(open(hosts_file).read()) # Pop the default settings default_settings = hosts.pop('_', {}) # Pre-populated defaults for host in hosts: base = base_settings.copy() base.update(default_settings) base.update(hosts[host]) hosts[host] = base # Validate all hosts have an entry in the .hosts file for target in env.hosts: if target not in hosts: abort(red('Error: No settings have been defined for the "{0}" host'.format(target))) settings = hosts[target] for key in required_settings: if not settings[key]: abort(red('Error: The setting "{0}" is not defined for "{1}" host'.format(key, target))) return hosts
def pip_install(requirements): """Install some pip requirements""" if isinstance(requirements, basestring): requirements = (requirements,) pip_path = path(local("which pip", capture=True)) if not pip_path.exists(): print pip_path print red("Cannot find pip!") return False use_sudo = False if pip_path.get_owner() == "root": # If pip is owned by root, we are gonna need sudo use_sudo = True print "Installing python requirements..." with lcd(conf.PROJECT_ROOT): for req in requirements: if use_sudo: result = local("sudo pip install -q %s" % req) else: result = local("pip install -q %s" % req) if not result.succeeded: print red("Failed to install %s" % req) return False return True
def _put_as_user(self, source, destination): for attempt in range(self.transfer_retries): retry = False try: with settings(show('everything'), warn_only=False): pout = put(source, destination, use_sudo=True) self._chown(destination) if pout.failed: raise Exception("Failed transfer: %s" % (pout)) else: basename = os.path.basename(destination) print(yellow("Sent file %s" % (basename))) except BaseException as e: retry = True print(red("Failed to upload %s on attempt %d" % (source, attempt + 1))) print(red(e)) except: # Should never get here, delete this block when more confident retry = True print(red("Failed to upload %s on attempt %d with unknown exception" % (source, attempt + 1))) finally: if not retry: return e = Exception("Failed to transfer file %s, exiting..." % source) print(red(e)) raise e
def deploy(): print green("Begining update...") print "" print blue('Checking pre-requisites...') print cyan("Checking for local changes...") has_changes = local("git status --porcelain", capture=True) if REQUIRE_CLEAN and has_changes: abort(red("Your working directory is not clean.")) print cyan("Ensuring remote working area is clean...") GIT_CMD = "git --work-tree={0} --git-dir={0}/.git".format(DEPLOY_PATH) has_changes = run(GIT_CMD + " status --porcelain") if has_changes: abort(red("Remote working directory is not clean.")) print blue("Finished checking pre-requisites.") print "" print green("Starting deployment...") print "" print green("Updating environment...") with cd(DEPLOY_PATH): print cyan("Pulling from master") run('git pull') sudo('reboot')
def migrate_settings(target): # Migrate this server's settings to another environment config = get_config() servers = get_roles() host = get_host() sudoer = servers[host]['sudo_user'] environment = servers[host]['environment'] settings_url = config['Application']['WordPress']['settings'] puts(cyan('It is recommended you export settings from %s first.' % environment)) if not target: sys.exit(red('How am I supposed to migrate if I don\'t know to go?')) with settings(sudo_user=sudoer), cd('/tmp/wp-settings'): try: sudo('git fetch origin') except: puts(red('The origin server could not be reached.')) if files.exists('.git/refs/heads/%s' % target): sudo('git checkout %s' % target) try: sudo('git merge origin/%s %s' % (environment, target)) except: puts(red('The origin server could not be reached.')) else: sudo('git checkout -b %s origin/%s' % (target, environment)) try: sudo('git push origin %s' % target) except SystemExit: puts(red('The origin server could not be reached.')) sudo('git checkout %s' % environment)
def syntax_check(): """Runs flake8 against the codebase.""" with fab_settings(warn_only=True): for file_type in settings.SYNTAX_CHECK: needs_to_abort = False # because egrep fails with exit code 1, we need to allow this as # a successful exit code in our env if 1 not in env.ok_ret_codes: env.ok_ret_codes.append(1) output = local( 'find -name "{}" -print'.format(file_type), capture=True, ) files = output.split() for file in files: if any(s in file for s in settings.SYNTAX_CHECK_EXCLUDES): continue result = local('egrep -i -n "{0}" {1}'.format( settings.SYNTAX_CHECK[file_type], file), capture=True) if result: warn(red("Syntax check found in '{0}': {1}".format( file, result))) needs_to_abort = True if needs_to_abort: abort(red('There have been errors. Please fix them and run' ' the check again.')) else: puts(green('Syntax check found no errors. Very good!'))
def install_extension_from_wp(type, name, version): if version == 'master': if is_extension_installed(type, name): sudo('wp %s update %s --allow-root' % (type, name)) else: install_cmd = sudo('wp %s install %s --allow-root' % (type, name)) if install_cmd.return_code == 0: puts(green('%s %s installed successfully.')) else: puts(red('%s %s could not install.' % (type, name))) else: if not is_extension_installed(type, name) or version != get_extension_version(type, name): puts(cyan('Plugin not installed or installed at the incorrect version, reinstalling')) uninstall_extension(type, name) if type == 'plugin': url = 'http://downloads.wordpress.org/plugin/%s.%s.zip' % (name, version) elif type == 'theme': url = 'http://wordpress.org/themes/download/%s.%s.zip' % (name, version) try: install_cmd = sudo('wp %s install %s --allow-root' % (type, url)) if install_cmd.return_code == 0: puts(green('%s %s installed successfully.' % (type, name))) else: puts(red('Failed to update %s' % name)) except SystemExit: puts(red('Failed to update %s' % name))
def _check_pyenv(py_versions): """ Check that pyenv and pyenv-virtualenv are installed and set up the compilers/virtual envs in case they do not exist """ if os.system('which pyenv'): print red("Can't find pyenv!") print yellow("Are you sure you have installed it?") sys.exit(-2) elif os.system('which pyenv-virtualenv'): print red("Can't find pyenv-virtualenv!") print yellow("Are you sure you have installed it?") sys.exit(-2) # list available pyenv versions av_versions = os.listdir(os.path.join(env.pyenv_dir, 'versions')) for py_version in py_versions: if py_version not in av_versions: print green('Installing Python {0}'.format(py_version)) pyenv_cmd('install {0}'.format(py_version), capture=True) local("echo \'y\' | pyenv virtualenv {0} indico-build-{0}".format(py_version)) with pyenv_env(py_version): local("pip install -r requirements.dev.txt")
def check_for_wp_cli(host): cli = host['wp-cli'] if cli is None: return sys.exit(red('No wp-cli specified in config.yaml. Please add the path to wp for this server.')) if not files.exists(cli): return sys.exit(red('WP does not exist in the %s directory. Please install wp-cli, it\'s damn handy!' % server)) return True
def install_sysv_init_script(nsd, nuser, cfgfile): """ Install the init script for an operational deployment of RASVAMT. The init script is an old System V init system. In the presence of a systemd-enabled system we use the update-rc.d tool to enable the script as part of systemd (instead of the System V chkconfig tool which we use instead). The script is prepared to deal with both tools. """ with settings(user=env.AWS_SUDO_USER): print(red("Initialising deployment")) sudo('usermod -a -G {} ec2-user'.format(env.APP_USER)) sudo('mkdir -p /etc/supervisor/') sudo('mkdir -p /etc/supervisor/conf.d/') sudo('cp {0}/fabfile/init/sysv/nginx.conf /etc/nginx/.'. format(APP_source_dir())) # copy nginx and supervisor conf files sudo('cp {0}/fabfile/init/sysv/rasvama.conf /etc/supervisor/conf.d/.'. format(APP_source_dir())) # create the DB with settings(user=env.APP_USER): virtualenv('cd {0}/db; python create_db.py'.format(env.APP_SRC_DIR)) #check if nginx is running else print(red("Server setup and ready to deploy")) #Think we have success("Init scripts installed")
def sysinitstart_RASVAMT_and_check_status(): """ Starts the APP daemon process and checks that the server is up and running then it shuts down the server """ # We sleep 2 here as it was found on Mac deployment to docker container # that the shell would exit before the APPDaemon could detach, thus # resulting in no startup self. # # Please replace following line with something meaningful # virtualenv('ngamsDaemon start -cfg {0} && sleep 2'.format(tgt_cfg)) env.APP_INSTALL_DIR = os.path.abspath(os.path.join(home(), APP_INSTALL_DIR_NAME)) env.APP_ROOT_DIR = os.path.abspath(os.path.join(home(), APP_ROOT_DIR_NAME)) env.APP_SRC_DIR = os.path.abspath(os.path.join(home(), APP_SRC_DIR_NAME)) info('Start {0} and check'.format(APP)) start_unicorn() with settings(user=env.AWS_SUDO_USER): sudo('service nginx start') try: u = urllib2.urlopen('http://{0}/static/html/index.html'. format(env.host_string)) except urllib2.URLError: red("RASVAMT NOT running!") return r = u.read() u.close() assert r.find('rasvamt-s-user-documentation') > -1, red("RASVAMT NOT running")
def docker_tryrun(imgname, containername=None, opts='', mounts=None, cmd='', restart=True): # mounts is a list of (from, to, canwrite) path tuples. ``from`` is relative to the project root. # Returns True if the container was effectively ran (false if it was restarted or aborted) if not mounts: mounts = [] if containername and containername in docker_ps(running_only=True): print green("%s already running" % containername) return False if containername and containername in docker_ps(running_only=False): if restart: print green("%s already exists and is stopped. Restarting!" % containername) local('docker restart %s' % containername) return True else: print red("There's a dangling container %s! That's not supposed to happen. Aborting" % containername) print "Run 'docker rm %s' to remove that container" % containername return False for from_path, to_path, canwrite in mounts: abspath = from_path opt = ' -v %s:%s' % (abspath, to_path) if not canwrite: opt += ':ro' opts += opt if containername: containername_opt = '--name %s' % containername else: containername_opt = '' local('docker run %s %s %s %s' % (opts, containername_opt, imgname, cmd)) return True
def _configure_node(configfile): """Exectutes chef-solo to apply roles and recipes to a node""" with hide('running'): print "Uploading node.json..." remote_file = '/root/{0}'.format(configfile.split("/")[-1]) # Ensure secure permissions put(configfile, remote_file, use_sudo=True, mode=400) sudo('chown root:root {0}'.format(remote_file)), sudo('mv {0} /etc/chef/node.json'.format(remote_file)), # Remove local temporary node file os.remove(configfile) # Always configure Chef Solo solo.configure() print "\n== Cooking ==\n" with settings(hide('warnings'), warn_only=True): output = sudo( 'chef-solo -l {0} -j /etc/chef/node.json'.format(env.loglevel)) if output.failed: if 'chef-solo: command not found' in output: print( colors.red( "\nFAILED: Chef Solo is not installed on this node")) print( "Type 'cook nodes:{0} deploy_chef' to install it".format( env.host)) abort("") else: print(colors.red( "\nFAILED: A problem occurred while executing chef-solo")) abort("") else: print(colors.green("\nSUCCESS: Node correctly configured"))
def export_settings(): data = get_settings() config = get_config() host = get_host() servers = get_roles() sudoer = servers[host]['sudo_user'] wp = servers[host]['wordpress'] wp_cli = check_for_wp_cli(role) settings_url = config['Application']['WordPress']['settings'] environment = servers[host]['environment'] if (not files.exists('/tmp/wp-settings')): with cd('/tmp/'): sudo('git clone %s wp-settings' % settings_url) with cd('/tmp/wp-settings'): try: sudo('git pull origin %s' % environment) except: puts(red('Could not reach the origin server.')) with settings(path=wp_cli, behavior='append', sudo_user=sudoer), cd(wp): for d in data: sudo('wp option get %s --format=json > /tmp/wp-settings/%s.json --allow-root' % (d, d)) with settings(sudo_user=sudoer), cd('/tmp/wp-settings'): sudo('git config core.fileMode 0') if (not files.exists('.git/refs/heads/%s' % environment) ): sudo('git checkout -b %s' % environment) else: sudo('git checkout %s' % environment) sudo('git add .') sudo('git commit -a -m "Settings update: %s"' % (datetime.date.today())) try: sudo('git push origin %s' % environment) except: puts(red('Could not communicate with origin server'))
def deploy(config, host): # add -H command to set home directory of target user new_sudo_command = "sudo -H -S -p '%(sudo_prompt)s'" print colors.green('change sudo_prefix from "%s" to "%s"' % (env['sudo_prefix'], new_sudo_command)) with settings(sudo_prefix=new_sudo_command): fixtures_dir = tempfile.mkdtemp() print colors.green('create fixtures directory: "%s"' % fixtures_dir) try: print colors.green('merge fixtures') for source_fixtures_dir in reversed(config.fixtures): dir_util.copy_tree(source_fixtures_dir, fixtures_dir) config.fixtures_path = fixtures_dir host.prepair() host.check() host.setup() except Exception, e: traceback.print_exc() print colors.red('Exception while deploing: "%s"' % e) finally:
def auto(env_name): """Auto migration based on the entry in the versions table. """ settings = get_settings(env_name) setup_db(settings) version = PersistentVersion(settings, 'service') migrated = True while migrated: migrated = False v = tuple([int(i) for i in version.version.split('.')]) for mv in sorted(MIGRATIONS.keys()): if mv > v: version_str = '.'.join([str(i) for i in mv]) migration = MIGRATIONS[mv] print migration.__doc__ print red('You are about to migrate from ' 'version %s to version %s!' % ( version.version, version_str) ) result = prompt("Run migration (y/N)?") if not result.lower().startswith('y'): abort(red('Migration aborted by user request!')) execute(partial(migration, env_name), hosts=settings['hosts']) # update the version in the database version.version = version_str migrated = True break
def manifest(): """print manifest file(s) for packages """ for package, egg in eggs(): print yellow("building manifest for %s" % package) mf = make_manifest_file(package, egg) print red("manifest: " + mf)
def repo_setup(repo, ref): """ Clone repository """ puts(cyan(">>> Setting up repository %s with ref %s..." % (repo, ref))) topsrcdir = repo_check(repo, check_path=False, workdir=False) workdir = repo_check(repo, check_path=False, workdir=True) gitdir = os.path.join(topsrcdir, '.git') if not os.path.exists(env.CFG_SRCDIR): res = confirm("Create repository root %s?" % env.CFG_SRCDIR) if not res: abort(red("Cannot continue") % env) else: local("mkdir -p %s" % env.CFG_SRCDIR) if not os.path.exists(gitdir) and os.path.exists(topsrcdir): res = confirm("Remove %s (it does not seem to be a git repository)?" % topsrcdir) if not res: abort(red("Cannot continue") % env) else: local("rm -Rf %s" % topsrcdir) if not os.path.exists(gitdir): git_clone(repo) if not os.path.exists(workdir): git_newworkdir(repo) git_checkout(repo, ref) repo_prepare(repo)
def git_clone(repo): topsrcdir = repo_check(repo, check_path=False, workdir=False) try: repo_url = dict(env.CFG_INVENIO_REPOS)[repo]['repository'] except KeyError: abort(red("Repository URL for %s not defined" % repo)) basename = os.path.basename(topsrcdir) parent = os.path.dirname(topsrcdir) if os.path.exists(topsrcdir): res = confirm("Remove existing source code in %s ?" % topsrcdir) if not res: abort(red("Cannot continue") % env) else: local("rm -Rf %s" % topsrcdir) else: if not os.path.exists(parent): local("mkdir -p %s" % parent) ctx = { 'basename': basename, 'parent': parent, 'topsrcdir': topsrcdir, 'url': repo_url, } local("cd %(parent)s; git clone %(url)s %(basename)s " % ctx)
def staging(): # path to the directory on the server where your vhost is set up home = '/home/web' path = "/home/web/webapps" # name of the restart shell script: This should include services restart for gunicorn, nginx, apache or uwsgi processes. process = "restart.sh" print(red("Beginning Deploy:")) with cd("{path}/project_directory".format(path=path)): run("pwd") branch_name = prompt('Checkout to which branch? ') print(green("Pulling {branch_name} from GitHub...".format(branch_name=branch_name))) run("git pull origin {branch_name}".format(branch_name=branch_name)) print(green("Installing requirements...")) run("source {home}/.virtualenvs/virtualenv_name/bin/activate && pip install -r requirements.txt".format(home=home)) print(green("Collecting static files...")) run("source {home}/.virtualenvs/virtualenv_name/bin/activate && python manage.py collectstatic --noinput".format(home=home)) print(green("Migrating the database...")) run("source {home}/.virtualenvs/virtualenv_name/bin/activate && python manage.py migrate".format(home=home)) with cd('{path}'.format(path=path)): print(green("Restart the gunicorn and nginx process")) run("./restart.sh") print(red("DONE!"))
def install_wordpress(version, host): if version == 'latest': # Update wordpress to the latest version try: sudo("wp core update --allow-root") print(green('WordPress installed successfully, moving on to configuration.')) except SystemExit: return sys.exit(red('WordPress core failed to install. Usually this is a network problem.')) else: if is_correct_wordpress_version(version): puts(green('WordPress is installed at the correct version, no need to update.')) else: # Not the correct version, so upgrade/downgrade to the correct version try: sudo("wp core update --version=%s --force --allow-root" % version) # recheck version now, since we have no way of knowing if the update ended successfully if is_correct_wordpress_version(version): print(green('WordPress installed successfully at version %s, moving on to configuration.' % version)) else: sys.exit(red('Something went wrong. Exepcted WordPress at %s but did not upgrade successfully.' % version)) except SystemExit: return sys.exit(red('WordPress failed to update!')) # Move the configurations into the new wordpress installation wp_config = host['wp-config'] try: sudo('cp -R %s configurations' % (wp_config)) sudo('chmod -R +x configurations') sudo('find . -iname \*.php | xargs chmod +x') print(green('WordPress fully configured.')) except SystemExit: return red('WordPress was not properly configured!')
def _debug(msg): print(red('Running: {msg}'.format(**locals())))
def die(msg, error_code=1): print("{}: {}".format(red("error"), msg)) sys.exit(error_code)
def report(): run("clear") print(red("-----------------------------------")) print(red("Visit %s/backend to continue") % env.domain) print(red("lodin - demo")) print(red("password - demo"))
def error(text, bold=True): print red(text, bold=bold)
def print_command(command): _print( blue("$ ", bold=True) + yellow(command, bold=True) + red(" ->", bold=True))
def birth(name, ram_or_disk_size=None, wait=False, no_profile=False): """Make a new box called <name> with the wanted <ram_or_disk_size>. This may use the direct method (i.e. via pyrax) or it may use salt-cloud, depending on whether or not there is a saltmaster present, if that saltmaster knows what profile we are talking about, if we are using a profile. :param str name: (friendly) name of server. :param int ram_or_disk_size: size of either RAM wanted or disk. :param bool wait: set True to hold off returning until the box is up :param bool no_profile: set True to make a 'throwaway' box, and not use salt-cloud, or install salt. """ saltmaster = NAMING_SCHEME['saltmaster'] profiles = yaml.load(open('cloud.profiles', 'r')) or {} if saltmaster in env.boxen: # then salt-cloud can do this herd(name=saltmaster) # use the saltmaster to do stuff execute(get, remote_path='/etc/salt/cloud.profiles', local_path='cloud.profiles') profiles = yaml.load(open('cloud.profiles', 'r')) or {} if name in profiles: # salt-cloud knows how to do this! execute(run, command="salt-cloud -p {0} {0}".format(name)) herd(name) brand() return True else: print red("Unknown profile, creating new one.") cs = pyrax.cloudservers ubuntu = [img for img in cs.images.list() if "Ubuntu 12.04" in img.name][0] flavour = [ flav for flav in cs.flavors.list() if float(flav.ram) == float(ram_or_disk_size) or float(flav.disk) == float(ram_or_disk_size) ][0] profiles[name] = { 'provider': 'rackspace-conf-{0}'.format(env.rackspace_user), 'size': str(flavour.name), 'image': str(ubuntu.name), } with open('cloud.profiles', 'w') as _profiles_file: _profiles_file.write(yaml.dump(profiles)) if saltmaster in env.boxen: # update the profiles on the salt master execute(put, local_path='cloud.profiles', remote_path='/etc/salt/cloud.profiles') if no_profile or not saltmaster in env.boxen: # make the box env.box = cs.servers.create(name, ubuntu.id, flavour.id) print green("Ok, made server {0}:{1}".format(env.box.name, env.box.id)) print green("Admin password (last chance!):"), red(env.box.adminPass) if wait: print green("Waiting until server is ready...") pyrax.utils.wait_for_build(env.box) herd(name, newborn=True) print green("Ok, server is ready!") else: # we want profile, so we probably want this done via salt-cloud. execute(run, command="salt-cloud -p {0} {0}".format(name)) herd(name) brand()
put('/root/.ssh/authorized_keys', '/root/.ssh/authorized_keys.temp', use_sudo=True) run('cat /root/.ssh/authorized_keys.temp > /root/.ssh/authorized_keys && rm - f /root/.ssh/authorized_keys.temp') def config_ssh_connection(): #execute(reset_ssh_public_host_key) execute(inject_admin_ssh_public_key) local('rm -f /root/.ssh/authorized_keys && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys') execute(scan_host_ssh_public_key) execute(put_authorized_keys) def terminal_debug_win32(func): command = "fab -f %s %s" % (__file__, func) os.system(command) def terminal_debug_posix(func): command = "fab -f %s %s" % (__file__, func) os.system(command) if __name__ == '__main__': import re if len(sys.argv) == 1: if is_windows(): terminal_debug_win32("config_ssh_connection") sys.exit(0) if is_linux(): terminal_debug_posix("config_ssh_connection") sys.exit(0) sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) print red("Please use 'fab -f %s'" % " ".join(str(x) for x in sys.argv[0:])) sys.exit(1)
def init_demo(fixture='n'): def ok(): print(green(' OK.')) fixture = fixture == 'y' # setup environment os.chdir('ca') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ca.demosettings") sys.path.insert(0, os.getcwd()) base_url = 'http://*****:*****@example.com'], ca=child_ca) ok() # Revoke host1 and host2 if not fixture: print('Revoke host2.example.com and host3.example.com...', end='') cert = Certificate.objects.get(cn='host2.example.com') cert.revoke() cert.save() cert = Certificate.objects.get(cn='host3.example.com') cert.revoke('key_compromise') cert.save() print(ok()) print('Create CRL and OCSP index...', end='') crl_path = os.path.join(ca_settings.CA_DIR, 'crl.pem') ocsp_index = os.path.join(ca_settings.CA_DIR, 'ocsp_index.txt') manage('dump_crl', crl_path) manage('dump_ocsp_index', ocsp_index, ca=root_ca) ok() ca_crl_path = os.path.join(ca_settings.CA_DIR, 'ca_crl.pem') # Concat the CA certificate and the CRL, this is required by "openssl verify" with open(crl_path) as crl, open(ca_crl_path, 'w') as ca_crl: ca_crl.write(root_ca.pub) ca_crl.write(crl.read()) # create a few watchers Watcher.from_addr('First Last <*****@*****.**>') Watcher.from_addr('Second Last <*****@*****.**>') # create admin user for login User.objects.create_superuser('user', '*****@*****.**', 'nopass') # write public ca cert so it can be used by demo commands below ca_crt = os.path.join(ca_settings.CA_DIR, '%s.pem' % root_ca.serial) with open(ca_crt, 'w') as outstream: outstream.write(root_ca.pub) ca_crt = os.path.join(ca_settings.CA_DIR, '%s.pem' % child_ca.serial) with open(ca_crt, 'w') as outstream: outstream.write(child_ca.pub) os.chdir('../') cwd = os.getcwd() rel = lambda p: os.path.relpath(p, cwd) # NOQA ca_crt = rel(ca_crt) host1_pem = rel(os.path.join(ca_settings.CA_DIR, 'host1.example.com.pem')) print("") print(green('* All certificates are in %s' % rel(ca_settings.CA_DIR))) print(green('* Verify with CRL:')) print('\topenssl verify -CAfile %s -crl_check %s' % (rel(ca_crl_path), rel(host1_pem))) print(green('* Run OCSP responder:')) print( '\topenssl ocsp -index %s -port 8888 -rsigner %s -rkey %s -CA %s -text' % (rel(ocsp_index), rel(ocsp_pem), rel(ocsp_key), ca_crt)) print(green('* Verify certificate with OCSP:')) print('\topenssl ocsp -CAfile %s -issuer %s -cert %s -url %s -resp_text' % (ca_crt, ca_crt, host1_pem, base_url)) print( green('* Start webserver on %s (user: user, password: nopass) with:' % base_url)) print( '\tDJANGO_SETTINGS_MODULE=ca.demosettings python ca/manage.py runserver' )
def parse_int_or_exit(val): try: return int(val) except (ValueError, TypeError): print(red("Unable to parse '{}' into an integer".format(val))) exit()
def warn(message): """Print one line warning message """ print red("[%s] * WARNING: %s" % (env.host_string, message))
from fabric.api import task, put, run, env, local, cd, require, abort, get from fabric.network import prompt_for_password from fabric.colors import red, green, white from fabric.contrib.console import confirm from fabric.operations import prompt from fabric.tasks import execute import pyrax import yaml pyrax.set_setting("identity_type", "rackspace") try: from settings import DOMAIN, NAMING_SCHEME except ImportError: print red("You need to define a settings file with DOMAIN and " "NAMING_SCHEME") sys.exit(1) SALT_CLOUD_TEMPLATE = { 'apikey': None, 'compute_name': 'cloudServersOpenStack', 'compute_region': 'LON', 'identity_url': 'https://identity.api.rackspacecloud.com/v2.0/tokens', 'minion': { 'master': None }, 'protocol': 'ipv4', 'provider': 'openstack', 'tenant': None, 'user': None, }
def halt(msg): ''' Terminate the script execution with a message ''' raise SystemExit(red(msg))
def print_env_and_user(): """ Print the envirioment and user """ print(red("Executing on %s(%s) as %s" % (env.host, env.server, env.user)))
def check_kraken_jormun_after_deploy(show=False): headers = {'Host': env.jormungandr_url} request_str = 'http://{}{}/v1/status'.format(env.jormungandr_url, env.jormungandr_url_prefix) print("request_str: {}".format(request_str)) try: # Send HTTP GET requests response = requests.get(request_str, headers=headers, auth=HTTPBasicAuth(env.token, '')) # If HTTP status_code Erreur. if response.status_code != 200: print(red("Request not successful : {}".format(str(response)))) return result = response.json() except (ConnectionError, HTTPError) as e: print(red("HTTP Error {}: {}".format(e.code, e.readlines()[0]))) return except JSONDecodeError as e: print(red("cannot read json response : {}".format(e))) return except Exception as e: print( red("Error when connecting to {}: {}".format( env.jormungandr_url, e))) return warn_dict = {'jormungandr': None, 'kraken': []} if re.match(r"v{}".format(show_version(action='get')[1]), result['jormungandr_version']): warn_dict['jormungandr'] = result['jormungandr_version'] for item in result['regions']: kraken_warn = { 'status': item['status'], 'region_id': item['region_id'] } if item['status'] == "dead": kraken_warn['kraken_version'] = None elif item['kraken_version'] != warn_dict['jormungandr']: kraken_warn['kraken_version'] = item['kraken_version'] elif item['status'] == "no_data": kraken_warn['kraken_version'] = warn_dict['jormungandr'] if 'kraken_version' in kraken_warn.keys(): warn_dict['kraken'].append(kraken_warn) if show: if warn_dict['jormungandr']: print( yellow("Jormungandr version={}".format( warn_dict['jormungandr']))) for item in warn_dict['kraken']: print( yellow( "status={status} | region_id={region_id} | kraken_version={kraken_version}" .format(**item))) return warn_dict
def init_demo(): # setup environment os.chdir('ca') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ca.settings") sys.path.insert(0, os.getcwd()) # setup django import django django.setup() # finally - imports! from django.conf import settings from django.contrib.auth import get_user_model from django.core.management import call_command as manage from django_ca import ca_settings from django_ca.models import Certificate from django_ca.models import CertificateAuthority from django_ca.models import Watcher User = get_user_model() if settings.DEBUG is not True: abort(red('Refusing to run if settings.DEBUG != True.')) if os.path.exists(os.path.join('ca', 'db.sqlite3')): abort(red('CA already set up.')) print(green('Creating database...')) manage('migrate', verbosity=0) print(green('Initiating CA...')) manage('init_ca', 'Root CA', '/C=AT/ST=Vienna/L=Vienna/O=example/OU=example/CN=ca.example.com', pathlen=1, ocsp_url='http://ocsp.ca.example.com', crl_url=['http://ca.example.com/crl'], issuer_url='http://ca.example.com/ca.crt', issuer_alt_name='https://ca.example.com') root_ca = CertificateAuthority.objects.get(name='Root CA') print(green('Initiating Child CA...')) manage( 'init_ca', 'Child CA', '/C=AT/ST=Vienna/L=Vienna/O=example/OU=example/CN=sub.ca.example.com', parent=root_ca) child_ca = CertificateAuthority.objects.get(name='Child CA') # generate OCSP certificate print(green('Generate OCSP certificate...')) ocsp_key, ocsp_csr, ocsp_pem = create_cert('localhost', alt=['localhost'], profile='ocsp') # Create some client certificates (always trust localhost to ease testing) for i in range(1, 10): hostname = 'host%s.example.com' % i print(green('Generate certificate for %s...' % hostname)) create_cert(hostname, cn=hostname, alt=['localhost']) # create stunnel.pem key_path = os.path.join(ca_settings.CA_DIR, 'host1.example.com.key') pem_path = os.path.join(ca_settings.CA_DIR, 'host1.example.com.pem') stunnel_path = os.path.join(ca_settings.CA_DIR, 'stunnel.pem') with open(key_path) as key, open(pem_path) as pem, open(stunnel_path, 'w') as stunnel: stunnel.write(key.read()) stunnel.write(pem.read()) print(green('Creating client certificate...')) create_cert('client', cn='First Last', cn_in_san=False, alt=['*****@*****.**'], ca=child_ca) # Revoke host1 and host2 print(green('Revoke host1.example.com and host2.example.com...')) cert = Certificate.objects.get(cn='host1.example.com') cert.revoke() cert.save() cert = Certificate.objects.get(cn='host2.example.com') cert.revoke('keyCompromise') cert.save() print(green('Create CRL and OCSP index...')) crl_path = os.path.join(ca_settings.CA_DIR, 'crl.pem') ocsp_index = os.path.join(ca_settings.CA_DIR, 'ocsp_index.txt') manage('dump_crl', crl_path) manage('dump_ocsp_index', ocsp_index, ca=root_ca) ca_crl_path = os.path.join(ca_settings.CA_DIR, 'ca_crl.pem') # Concat the CA certificate and the CRL, this is required by "openssl verify" with open(crl_path) as crl, open(ca_crl_path, 'w') as ca_crl: ca_crl.write(root_ca.pub) ca_crl.write(crl.read()) # create a few watchers Watcher.from_addr('First Last <*****@*****.**>') Watcher.from_addr('Second Last <*****@*****.**>') # create admin user for login User.objects.create_superuser('user', '*****@*****.**', 'nopass') # write public ca cert so it can be used by demo commands below ca_crt = os.path.join(ca_settings.CA_DIR, '%s.pem' % root_ca.serial) with open(ca_crt, 'w') as outstream: outstream.write(root_ca.pub) ca_crt = os.path.join(ca_settings.CA_DIR, '%s.pem' % child_ca.serial) with open(ca_crt, 'w') as outstream: outstream.write(child_ca.pub) os.chdir('../') cwd = os.getcwd() rel = lambda p: os.path.relpath(p, cwd) ca_crt = rel(ca_crt) host1_pem = rel(os.path.join(ca_settings.CA_DIR, 'host1.example.com.pem')) print("") print(green('* All certificates are in %s' % rel(ca_settings.CA_DIR))) print(green('* Verify with CRL:')) print('\topenssl verify -CAfile %s -crl_check %s' % (rel(ca_crl_path), rel(host1_pem))) print(green('* Run OCSP responder:')) print( '\topenssl ocsp -index %s -port 8888 -rsigner %s -rkey %s -CA %s -text' % (rel(ocsp_index), rel(ocsp_pem), rel(ocsp_key), ca_crt)) print(green('* Verify certificate with OCSP:')) print( '\topenssl ocsp -CAfile %s -issuer %s -cert %s -url http://localhost:8888 -resp_text' % (ca_crt, ca_crt, host1_pem)) print( green( '* Start webserver on http://localhost:8000 (user: user, password: nopass) with:' )) print('\tpython ca/manage.py runserver')
from fabric.contrib.console import confirm from fabric.colors import red, green from datetime import date, timedelta import operator import time import config import git import pear import shell import tools try: import custom except ImportError: print( red('Cannot import custom. Any custom fabric task will not be available.' )) def load_config(): """ Load the yaml configuration file into the env dictionary. This depends on the deployment_target being set and should typically get called from either dev() or prod(). """ env.update( config.load_yaml_config('app/config/deployment/config.yml', env.deployment_target)) @task def target(target):
def test_jormungandr(server, instance=None, fail_if_error=True): """ Test jormungandr globally (/v1/coverage) or a given instance Note: we don't launch that with a role because we want to test the access from the outside of the server """ headers = {'Host': env.jormungandr_url} request_str = 'http://{}{}/v1/coverage'.format(server, env.jormungandr_url_prefix) technical_requests = { 'vehicle_journeys': 'http://{}{}/v1/coverage/{}/vehicle_journeys?count=1'.format( server, env.jormungandr_url_prefix, instance), 'stop_points': 'http://{}{}/v1/coverage/{}/stop_points?count=1'.format( server, env.jormungandr_url_prefix, instance) } result = {} if instance: request_str = 'http://{}{}/v1/coverage/{}/status'.format( server, env.jormungandr_url_prefix, instance) try: response = requests.get(request_str, headers=headers, auth=HTTPBasicAuth(env.token, '')) response.raise_for_status() print("{} -> {}".format(response.url, green(response.status_code))) if instance: for query_type, url in technical_requests.items(): r = requests.get(url, headers=headers, auth=HTTPBasicAuth(env.token, '')) # Raise error if status_code != 200 if r.status_code != 200 or query_type not in r.json().keys( ) or 'error' in r.json().keys(): print("{} ({}) -> {}".format(query_type, yellow(r.status_code), r.json()['error'])) else: print("{} -> {}".format(query_type, green(r.status_code))) result = response.json() except (ConnectionError, HTTPError) as e: if fail_if_error: print(red("Connection or HTTP Error {}".format(e))) exit(1) else: print( yellow( "WARNING: {} is running but problem found: {} (maybe no data ?)" .format(instance, e))) exit(0) except JSONDecodeError as e: print(red("cannot read json response : {}".format(e))) exit(1) except Exception as e: print(red("Error when connecting to %s: %s" % (env.jormungandr_url, e))) exit(1) # if result contain just a message, this indicate a problem if 'message' in result and fail_if_error: print(red("CRITICAL: Problem on result: '{}'".format(result))) exit(1) elif 'message' in result: print(yellow("WARNING: Problem on result: '{}'".format(result))) return False if instance: print( green("Kraken Version is {}".format( result['status']['kraken_version']))) # just check that there is one instance running else: regions = result['regions'] active_instance = [ i for i in env.instances.keys() if i not in env.excluded_instances ] if len(regions) != len(active_instance): print red( "there is not the right number of instances, " "we should have {ref} but we got {real} instances".format( ref=len(active_instance), real=len(regions))) print red('instances in diff: {}'.format( set(active_instance).symmetric_difference( set([r['id'] for r in regions])))) if fail_if_error: exit(1) return False else: # We check that at least one is ok statuses = [(r['id'], r['status']) for r in regions] if all(map(lambda p: p[1] == 'running', statuses)): print green('all instances are ok, everything is fine') return True print blue('running instances: {}'.format( [r[0] for r in statuses if r[1] == 'running'])) print red('KO instances: {}'.format( [r for r in statuses if r[1] != 'running'])) if fail_if_error: exit(1) return False return True