def go_setup(stage="development"): """ Install the correct services on each machine $ fab -i deploy/[your private SSH key here] set_hosts go_setup """ stage_exists(stage) PROVIDER = get_provider_dict() # Determine if a master/slave relationship exists for databases in config slave = [] for db in ['mysql', 'postgresql', 'postgresql-client']: slave.append( any([ 'slave' in PROVIDER['machines'][stage][name].get('services', {}).get(db, {}) for name in PROVIDER['machines'][stage] ])) replication = any(slave) # Begin installing and setting up services for name in PROVIDER['machines'][stage]: node_dict = PROVIDER['machines'][stage][name] host = node_dict['public_ip'][0] if host == fabric.api.env.host: set_hostname(name) prepare_server() for service in node_dict['services'].keys(): settings = node_dict['services'][service] if service == 'nginx': nginx_install() nginx_setup(stage=stage) elif service == 'redis': redis_install() redis_setup() elif service == 'uwsgi': uwsgi_install() uwsgi_setup(stage=stage) elif service == 'mysql': mysql_install() mysql_setup(stage=stage, replication=replication, **settings) elif service == 'postgresql': postgresql_install(name, node_dict, stage=stage, **settings) postgresql_setup(name, node_dict, stage=stage, **settings) elif service == 'postgresql-client': postgresql_client_install() elif service in ['apache']: fabric.api.warn( fabric.colors.yellow("%s is not yet available" % service)) else: fabric.api.warn('%s is not an available service' % service)
def go_setup(stage="development"): """ Install the correct services on each machine $ fab -i deploy/[your private SSH key here] set_hosts go_setup """ stage_exists(stage) PROVIDER = get_provider_dict() # Determine if a master/slave relationship exists for databases in config slave = [] for db in ['mysql', 'postgresql', 'postgresql-client']: slave.append( any(['slave' in PROVIDER['machines'][stage][name].get( 'services', {}).get( db, {}) for name in PROVIDER['machines'][stage]])) replication = any(slave) # Begin installing and setting up services for name in PROVIDER['machines'][stage]: instance_dict = PROVIDER['machines'][stage][name] host = instance_dict['public_ip'][0] if host == fabric.api.env.host: set_hostname(name) prepare_server() for service in instance_dict['services'].keys(): settings = instance_dict['services'][service] if service == 'nginx': nginx_install() nginx_setup(stage=stage) elif service == 'redis': redis_install() redis_setup() elif service == 'nodejs': nodejs_install() nodejs_setup() elif service == 'uwsgi': uwsgi_install() uwsgi_setup(stage=stage) elif service == 'mysql': mysql_install() mysql_setup(stage=stage, replication=replication, **settings) elif service == 'postgresql': postgresql_install(name, instance_dict, stage=stage, **settings) postgresql_setup(name, instance_dict, stage=stage, **settings) elif service == 'postgresql-client': postgresql_client_install() elif service in ['apache']: fabric.api.warn(fabric.colors.yellow("%s is not yet available" % service)) else: fabric.api.warn('%s is not an available service' % service)
def go_deploy(stage="development", tagname="trunk"): """ Deploy project and make active on any machine with server software $ fab -i deploy/[your private SSH key here] set_hosts go_deploy """ stage_exists(stage) PROVIDER = get_provider_dict() for name in PROVIDER['machines'][stage]: node_dict = PROVIDER['machines'][stage][name] host = node_dict['public_ip'][0] if host == fabric.api.env.host: service = node_dict['services'] # If any of these services are listed then deploy the project if list(set(['nginx','uwsgi','apache']) & set(node_dict['services'])): deploy_full(tagname,force=True)
def go_deploy(stage="development", tagname="trunk"): """ Deploy project and make active on any machine with server software $ fab -i deploy/[your private SSH key here] set_hosts go_deploy """ stage_exists(stage) PROVIDER = get_provider_dict() for name in PROVIDER['machines'][stage]: node_dict = PROVIDER['machines'][stage][name] host = node_dict['public_ip'][0] if host == fabric.api.env.host: service = node_dict['services'] # If any of these services are listed then deploy the project if list( set(['nginx', 'uwsgi', 'apache']) & set(node_dict['services'])): deploy_full(tagname, force=True)
def postgresql_install(id, node_dict, stage, **options): """ Installs postgreSQL """ if _postgresql_is_installed(): fabric.api.warn(fabric.colors.yellow('PostgreSQL is already installed.')) return config = get_provider_dict() if 'slave' in options: master = config['machines'][stage][options['slave']] options.update(master['services']['postgresql']) package_add_repository('ppa:pitti/postgresql') package_install(['postgresql', 'python-psycopg2']) # Figure out cluster name output = fabric.api.run('pg_lsclusters -h') version, cluster = output.split()[:2] if 'ec2' in fabric.api.env.conf['PROVIDER']: if not options.get('simple'): package_install('xfsprogs') package_install('mdadm', '--no-install-recommends') # Create two ebs volumes import boto.ec2 ec2 = boto.ec2.connect_to_region(config['location'][:-1], aws_access_key_id = fabric.api.env.conf['AWS_ACCESS_KEY_ID'], aws_secret_access_key = fabric.api.env.conf['AWS_SECRET_ACCESS_KEY']) tag1 = u'%s-1' % id tag2 = u'%s-2' % id if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag1): volume1 = ec2.create_volume(options.get('max-size', 10)/2, config['location']) volume1.add_tag('Name', tag1) volume1.attach(node_dict['id'], '/dev/sdf') if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag2): volume2 = ec2.create_volume(options.get('max-size', 10)/2, config['location']) volume2.add_tag('Name', tag2) volume2.attach(node_dict['id'], '/dev/sdg') time.sleep(10) # RAID 0 together the EBS volumes, and format the result as xfs. Mount at /data. if not fabric.contrib.files.exists('/dev/md0', True): fabric.api.sudo('mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg') fabric.api.sudo('mkfs.xfs /dev/md0') # Add mountpoint if not fabric.contrib.files.exists('/data'): fabric.api.sudo('mkdir -p /data') fabric.api.sudo('chown postgres:postgres /data') fabric.api.sudo('chmod 644 /data') # Add to fstab and mount append('/etc/fstab', '/dev/md0 /data auto defaults 0 0', True) with fabric.api.settings(warn_only = True): fabric.api.sudo('mount /data') # Move cluster/dbs to /data if fabric.api.run('pg_lsclusters -h').split()[5] != '/data': fabric.api.sudo('pg_dropcluster --stop %s %s' % (version, cluster)) fabric.api.sudo('pg_createcluster --start -d /data -e UTF-8 %s %s' % (version, cluster)) else: fabric.api.warn(fabric.colors.yellow('PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances')) fabric.api.sudo('service postgresql stop') # Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster) fabric.contrib.files.comment(pg_dir + 'postgresql.conf', 'listen_addresses', True) append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True) append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True) fabric.contrib.files.sed(pg_dir + 'pg_hba.conf', "ident", "trust", use_sudo=True) # Figure out if we're a master if 'slave' not in options and any('slave' in values.get('services', {}).get('postgresql', {}) for name, values in config['machines'][stage].iteritems()): # We're a master! append(pg_dir + 'postgresql.conf', [ 'wal_level = hot_standby', 'max_wal_senders = 1', 'checkpoint_segments = 8', 'wal_keep_segments = 8'], True) append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5", True) elif 'slave' in options: # We're a slave! append(pg_dir + 'postgresql.conf', [ 'hot_standby = on', 'checkpoint_segments = 8', 'wal_keep_segments = 8'], True) #fabric.api.sudo('rm -rf /data/*') append('/data/recovery.conf', [ "standby_mode = 'on'", "primary_conninfo = 'host=%s port=5432 user=%s password=%s'" % (master['public_ip'][0], options['user'], options['password']), "trigger_file = '/data/failover'"], True) fabric.api.local('''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /''' % (fabric.api.env.key_filename[0], master['public_ip'][0], node_dict['public_ip'][0])) fabric.api.sudo('chown -R postgres:postgres /data') fabric.api.sudo('service postgresql start')
def mysql_setup(**kwargs): """ Method to set up mysql This method takes kwargs that can define the stage, the name of the database to create, the user and password to enable, or the slave database record. In the case of slave it will look up the record from the conf file. """ if not _mysql_is_installed(): fabric.api.warn(fabric.colors.yellow("MySQL must be installed.")) return # Get Parameters stage = kwargs.get("stage", None) name = kwargs.get("name", None) user = kwargs.get("user", None) password = kwargs.get("password", None) slave = kwargs.get("slave", None) # Name of the master database slave_user = kwargs.get("slave_user", "slave_user") slave_password = kwargs.get("slave_password", "password") replication = kwargs.get("replication", False) # Do Master/Slave replication # The root db password for setup root_passwd = "" if "DB_PASSWD" in fabric.api.env.conf: root_passwd = fabric.api.env.conf["DB_PASSWD"] # Private IP is necessary for bind-address private_ip = get_internal_ip() # Update the bind-address to the internal ip mysql_conf = "/etc/mysql/my.cnf" before = "bind-address[[:space:]]*=[[:space:]]*127.0.0.1" after = "bind-address = 0.0.0.0" # "bind-address = %s" % private_ip if not fabric.contrib.files.contains(mysql_conf, after): fabric.contrib.files.sed(mysql_conf, before, after, use_sudo=True, backup=".bkp") # Create the database user if user and password and not slave: mysql_create_user(user="******", password=root_passwd, new_user=user, new_password=password) # Create the database if name: mysql_create_db(user="******", password=root_passwd, database=name) # If replication is True then we must do the setup if replication: PROVIDER = get_provider_dict() # If the replication is True and the 'slave' key is given then this should be set up # as a slave database and we should do a conf file lookup to return values # for name, user and password to use in setup if slave: if slave in PROVIDER["machines"][stage]: # Get the private IP of the master database master_ip = PROVIDER["machines"][stage][slave]["private_ip"][0] # Get the settings of the master database settings = PROVIDER["machines"][stage][slave]["services"]["mysql"] name = settings.get("name", None) user = settings.get("user", None) password = settings.get("password", None) # Create the database mysql_create_db(user="******", password=root_passwd, database=name) # Set up a slave conf file and restart context = { "db_name": name, "db_password": slave_password, "db_user": slave_user, "master_ip": master_ip, } template = os.path.join(fabric.api.env.conf["FILES"], "mysqld_slave.cnf") fabric.contrib.files.upload_template(template, "/etc/mysql/conf.d/", context=context, use_sudo=True) mysql_restart() # Load the data from master mysql_execute("""LOAD DATA FROM MASTER;""", "root", root_passwd) mysql_restart() log_file = fabric.api.prompt("Enter the master log file name:") log_pos = fabric.api.prompt("Enter the master log position:") mysql_execute( """STOP SLAVE;CHANGE MASTER TO MASTER_HOST="%s", MASTER_USER="******", MASTER_PASSWORD="******", MASTER_LOG_FILE="%s", MASTER_LOG_POS=%s;START SLAVE;""" % (master_ip, slave_user, slave_password, log_file, log_pos), "root", root_passwd, ) else: fabric.api.warn(fabric.colors.yellow("The server %s is not available in %s" % (slave, stage))) # If the replication is True and the 'slave' key is not given then this should be set up # as a master database and we should correctly set the values in the my.cnf file else: # Set up the accepted IP for the slave user # default to using '%%', which will open all # Note that this relies on having the hostname set correctly, else default is used slave_ip = "%%" for hostname in PROVIDER["machines"][stage]: if "slave" in PROVIDER["machines"][stage][hostname]["services"]["mysql"]: if PROVIDER["machines"][stage][hostname]["services"]["mysql"]["slave"] == get_hostname(): slave_ip = PROVIDER["machines"][stage][hostname]["private_ip"][0] break # Set up a master conf file and restart context = {"db_name": name} template = os.path.join(fabric.api.env.conf["FILES"], "mysqld_master.cnf") fabric.contrib.files.upload_template(template, "/etc/mysql/conf.d/", context=context, use_sudo=True) mysql_restart() # Create the slave user for replication and restart mysql_execute( """GRANT REPLICATION SLAVE,RELOAD,SUPER ON *.* TO "%s"@"%s" IDENTIFIED BY "%s";FLUSH PRIVILEGES;""" % (slave_user, slave_ip, slave_password), "root", root_passwd, ) mysql_restart() # Get the master status mysql_execute("""USE %s;FLUSH TABLES WITH READ LOCK;SHOW MASTER STATUS;""" % (name), "root", root_passwd) fabric.contrib.console.confirm("Write down the log file name and position. Do you want to continue?") mysql_execute("""UNLOCK TABLES;""", "root", root_passwd)
def postgresql_install(id, node_dict, stage, **options): """ Installs postgreSQL """ if _postgresql_is_installed(): fabric.api.warn( fabric.colors.yellow('PostgreSQL is already installed.')) return config = get_provider_dict() if 'slave' in options: master = config['machines'][stage][options['slave']] options.update(master['services']['postgresql']) package_add_repository('ppa:pitti/postgresql') package_install(['postgresql', 'python-psycopg2']) # Figure out cluster name output = fabric.api.run('pg_lsclusters -h') version, cluster = output.split()[:2] if 'ec2' in fabric.api.env.conf['PROVIDER']: if not options.get('simple'): package_install('xfsprogs') package_install('mdadm', '--no-install-recommends') # Create two ebs volumes import boto.ec2 ec2 = boto.ec2.connect_to_region( config['location'][:-1], aws_access_key_id=fabric.api.env.conf['AWS_ACCESS_KEY_ID'], aws_secret_access_key=fabric.api.env. conf['AWS_SECRET_ACCESS_KEY']) tag1 = u'%s-1' % id tag2 = u'%s-2' % id if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag1): volume1 = ec2.create_volume( options.get('max-size', 10) / 2, config['location']) volume1.add_tag('Name', tag1) volume1.attach(node_dict['id'], '/dev/sdf') if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag2): volume2 = ec2.create_volume( options.get('max-size', 10) / 2, config['location']) volume2.add_tag('Name', tag2) volume2.attach(node_dict['id'], '/dev/sdg') time.sleep(10) # RAID 0 together the EBS volumes, and format the result as xfs. Mount at /data. if not fabric.contrib.files.exists('/dev/md0', True): fabric.api.sudo( 'mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg' ) fabric.api.sudo('mkfs.xfs /dev/md0') # Add mountpoint if not fabric.contrib.files.exists('/data'): fabric.api.sudo('mkdir -p /data') fabric.api.sudo('chown postgres:postgres /data') fabric.api.sudo('chmod 644 /data') # Add to fstab and mount append('/etc/fstab', '/dev/md0 /data auto defaults 0 0', True) with fabric.api.settings(warn_only=True): fabric.api.sudo('mount /data') # Move cluster/dbs to /data if fabric.api.run('pg_lsclusters -h').split()[5] != '/data': fabric.api.sudo('pg_dropcluster --stop %s %s' % (version, cluster)) fabric.api.sudo( 'pg_createcluster --start -d /data -e UTF-8 %s %s' % (version, cluster)) else: fabric.api.warn( fabric.colors.yellow( 'PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances' )) fabric.api.sudo('service postgresql stop') # Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster) fabric.contrib.files.comment(pg_dir + 'postgresql.conf', 'listen_addresses', True) append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True) append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True) fabric.contrib.files.sed(pg_dir + 'pg_hba.conf', "ident", "trust", use_sudo=True) # Figure out if we're a master if 'slave' not in options and any( 'slave' in values.get('services', {}).get('postgresql', {}) for name, values in config['machines'][stage].iteritems()): # We're a master! append(pg_dir + 'postgresql.conf', [ 'wal_level = hot_standby', 'max_wal_senders = 1', 'checkpoint_segments = 8', 'wal_keep_segments = 8' ], True) append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5", True) elif 'slave' in options: # We're a slave! append(pg_dir + 'postgresql.conf', [ 'hot_standby = on', 'checkpoint_segments = 8', 'wal_keep_segments = 8' ], True) #fabric.api.sudo('rm -rf /data/*') append('/data/recovery.conf', [ "standby_mode = 'on'", "primary_conninfo = 'host=%s port=5432 user=%s password=%s'" % (master['public_ip'][0], options['user'], options['password']), "trigger_file = '/data/failover'" ], True) fabric.api.local( '''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /''' % (fabric.api.env.key_filename[0], master['public_ip'][0], node_dict['public_ip'][0])) fabric.api.sudo('chown -R postgres:postgres /data') fabric.api.sudo('service postgresql start')
def mysql_setup(**kwargs): """ Method to set up mysql This method takes kwargs that can define the stage, the name of the database to create, the user and password to enable, or the slave database record. In the case of slave it will look up the record from the conf file. """ if not _mysql_is_installed(): fabric.api.warn(fabric.colors.yellow('MySQL must be installed.')) return # Get Parameters stage = kwargs.get('stage',None) name = kwargs.get('name',None) user = kwargs.get('user',None) password = kwargs.get('password',None) slave = kwargs.get('slave',None) # Name of the master database slave_user = kwargs.get('slave_user','slave_user') slave_password = kwargs.get('slave_password','password') replication = kwargs.get('replication',False) # Do Master/Slave replication # The root db password for setup root_passwd = '' if 'DB_PASSWD' in fabric.api.env.conf: root_passwd = fabric.api.env.conf['DB_PASSWD'] # Private IP is necessary for bind-address private_ip = get_internal_ip() # Update the bind-address to the internal ip mysql_conf = '/etc/mysql/my.cnf' before = "bind-address[[:space:]]*=[[:space:]]*127.0.0.1" after = "bind-address = %s" % private_ip if not fabric.contrib.files.contains(mysql_conf, after): fabric.contrib.files.sed(mysql_conf,before,after, use_sudo=True, backup='.bkp') # Create the database user if user and password and not slave: mysql_create_user(user='******',password=root_passwd, new_user=user,new_password=password) # Create the database if name: mysql_create_db(user='******',password=root_passwd, database=name) # If replication is True then we must do the setup if replication: PROVIDER = get_provider_dict() # If the replication is True and the 'slave' key is given then this should be set up # as a slave database and we should do a conf file lookup to return values # for name, user and password to use in setup if slave: if slave in PROVIDER['machines'][stage]: # Get the private IP of the master database master_ip = PROVIDER['machines'][stage][slave]['private_ip'][0] # Get the settings of the master database settings = PROVIDER['machines'][stage][slave]['services']['mysql'] name = settings.get('name',None) user = settings.get('user',None) password = settings.get('password',None) # Create the database mysql_create_db(user='******',password=root_passwd, database=name) # Set up a slave conf file and restart context = { 'db_name' : name, 'db_password' : slave_password, 'db_user' : slave_user, 'master_ip' : master_ip, } template = os.path.join(fabric.api.env.conf['FILES'],'mysqld_slave.cnf') fabric.contrib.files.upload_template(template,'/etc/mysql/conf.d/', context=context,use_sudo=True) mysql_restart() # Load the data from master mysql_execute("""LOAD DATA FROM MASTER;""" , 'root', root_passwd) mysql_restart() log_file = fabric.api.prompt('Enter the master log file name:') log_pos = fabric.api.prompt('Enter the master log position:') mysql_execute("""STOP SLAVE;CHANGE MASTER TO MASTER_HOST="%s", MASTER_USER="******", MASTER_PASSWORD="******", MASTER_LOG_FILE="%s", MASTER_LOG_POS=%s;START SLAVE;""" % (master_ip,slave_user,slave_password,log_file,log_pos), 'root', root_passwd) else: fabric.api.warn(fabric.colors.yellow('The server %s is not available in %s' % (slave,stage))) # If the replication is True and the 'slave' key is not given then this should be set up # as a master database and we should correctly set the values in the my.cnf file else: # Set up the accepted IP for the slave user # default to using '%%', which will open all # Note that this relies on having the hostname set correctly, else default is used slave_ip = '%%' for hostname in PROVIDER['machines'][stage]: if 'slave' in PROVIDER['machines'][stage][hostname]['services']['mysql']: if PROVIDER['machines'][stage][hostname]['services']['mysql']['slave'] == get_hostname(): slave_ip = PROVIDER['machines'][stage][hostname]['private_ip'][0] break # Set up a master conf file and restart context = { 'db_name' : name, } template = os.path.join(fabric.api.env.conf['FILES'],'mysqld_master.cnf') fabric.contrib.files.upload_template(template,'/etc/mysql/conf.d/', context=context,use_sudo=True) mysql_restart() # Create the slave user for replication and restart mysql_execute("""GRANT REPLICATION SLAVE,RELOAD,SUPER ON *.* TO "%s"@"%s" IDENTIFIED BY "%s";FLUSH PRIVILEGES;""" % (slave_user,slave_ip,slave_password), 'root', root_passwd) mysql_restart() # Get the master status mysql_execute("""USE %s;FLUSH TABLES WITH READ LOCK;SHOW MASTER STATUS;""" % (name), 'root', root_passwd) fabric.contrib.console.confirm('Write down the log file name and position. Do you want to continue?') mysql_execute("""UNLOCK TABLES;""", 'root', root_passwd)