示例#1
0
def create_volume(module, ec2, zone):
    changed = False
    name = module.params.get("name")
    id = module.params.get("id")
    instance = module.params.get("instance")
    iops = module.params.get("iops")
    encrypted = module.params.get("encrypted")
    volume_size = module.params.get("volume_size")
    volume_type = module.params.get("volume_type")
    snapshot = module.params.get("snapshot")
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = "io1"

    volume = get_volume(module, ec2)
    if volume is None:
        try:
            if boto_supports_volume_encryption():
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
                changed = True
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
                changed = True

            while volume.status != "available":
                time.sleep(3)
                volume.update()

            if name:
                ec2.create_tags([volume.id], {"Name": name})
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
示例#2
0
def create_volume(module, ec2, zone):
    changed = False
    name = module.params.get('name')
    iops = module.params.get('iops')
    encrypted = module.params.get('encrypted')
    volume_size = module.params.get('volume_size')
    volume_type = module.params.get('volume_type')
    snapshot = module.params.get('snapshot')
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = 'io1'

    volume = get_volume(module, ec2)
    if volume is None:
        try:
            if boto_supports_volume_encryption():
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
                changed = True
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
                changed = True

            while volume.status != 'available':
                time.sleep(3)
                volume.update()

            if name:
                ec2.create_tags([volume.id], {"Name": name})
        except boto.exception.BotoServerError as e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))

    return volume, changed
示例#3
0
def create_volume(module, ec2, zone):
    changed = False
    name = module.params.get('name')
    id = module.params.get('id')
    instance = module.params.get('instance')
    iops = module.params.get('iops')
    encrypted = module.params.get('encrypted')
    volume_size = module.params.get('volume_size')
    volume_type = module.params.get('volume_type')
    snapshot = module.params.get('snapshot')
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = 'io1'

    volume = get_volume(module, ec2)
    if volume is None:
        try:
            if boto_supports_volume_encryption():
                volume = ec2.create_volume(volume_size, zone, snapshot,
                                           volume_type, iops, encrypted)
                changed = True
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot,
                                           volume_type, iops)
                changed = True

            while volume.status != 'available':
                time.sleep(3)
                volume.update()

            if name:
                ec2.create_tags([volume.id], {"Name": name})
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
示例#4
0
def create_volume(module, ec2, zone):
    name = module.params.get('name')
    id = module.params.get('id')
    instance = module.params.get('instance')
    iops = module.params.get('iops')
    encrypted = module.params.get('encrypted')
    volume_size = module.params.get('volume_size')
    volume_type = module.params.get('volume_type')
    snapshot = module.params.get('snapshot')
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = 'io1'

    if instance == 'None' or instance == '':
        instance = None

    # If no instance supplied, try volume creation based on module parameters.
    if name or id:
        if iops or volume_size:
            module.fail_json(
                msg=
                "Parameters are not compatible: [id or name] and [iops or volume_size]"
            )

        volume = get_volume(module, ec2)
        if volume.attachment_state() is not None:
            if instance is None:
                return volume
            adata = volume.attach_data
            if adata.instance_id != instance:
                module.fail_json(
                    msg="Volume %s is already attached to another instance: %s"
                    % (name or id, adata.instance_id))
            else:
                module.exit_json(
                    msg="Volume %s is already mapped on instance %s: %s" %
                    (name or id, adata.instance_id, adata.device),
                    volume_id=id,
                    device=adata.device,
                    changed=False)
    else:
        try:
            if boto_supports_volume_encryption():
                volume = ec2.create_volume(volume_size, zone, snapshot,
                                           volume_type, iops, encrypted)
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot,
                                           volume_type, iops)

            while volume.status != 'available':
                time.sleep(3)
                volume.update()
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
示例#5
0
	def create_and_attach_volume(self,instance, volume_size, device_name):
		
		"""
		Create a new EBS volume and attach it to the instance.
		
		instance The instance object representing the instance to which
		the volume will be attached.
		
		volume_size The size (in GB) of the new volume.
		
		device_name The device name to which the new volume will be
		referred to on the instance.
		"""
		
		region = str(instance.region).split(":")
		ec2 = boto.ec2.connect_to_region(region[1])

		# Determine the Availability Zone of the instance
		
		azone = instance.placement
		volume = ec2.create_volume(volume_size, azone)
		
		# Wait for the volume to be created.
		while volume.status != 'available':
		
			time.sleep(5)
			volume.update()
		
		volume.attach(instance.id, device_name)
		
		return volume		
def create_volume(module, ec2, zone):
    name = module.params.get('name')
    id = module.params.get('id')
    instance = module.params.get('instance')
    iops = module.params.get('iops')
    encrypted = module.params.get('encrypted')
    volume_size = module.params.get('volume_size')
    volume_type = module.params.get('volume_type')
    snapshot = module.params.get('snapshot')
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = 'io1'

    if instance == 'None' or instance == '':
        instance = None

    # If no instance supplied, try volume creation based on module parameters.
    if name or id:
        if iops or volume_size:
            module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")

        volume = get_volume(module, ec2)
        if volume.attachment_state() is not None:
            if instance is None:
                return volume
            adata = volume.attach_data
            if adata.instance_id != instance:
                module.fail_json(msg = "Volume %s is already attached to another instance: %s"
                                 % (name or id, adata.instance_id))
            else:
                module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
                                 (name or id, adata.instance_id, adata.device),
                                 volume_id=id,
                                 device=adata.device,
                                 changed=False)
    else:
        try:
            if boto_supports_volume_encryption():
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)

            while volume.status != 'available':
                time.sleep(3)
                volume.update()
        except boto.exception.BotoServerError, e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
示例#7
0
def restore_volumes():
    ec2 = boto.ec2.connect_to_region(region)
    volume_id = 'vol-f95619ec'
    bad_volume = ec2.get_all_volumes([volume_id])[0]
    snaps = bad_volume.snapshots() 
    snaps.sort(key=lambda snap: snap.start_time)
    latest_snap = snaps[-1]
    new_volume = ec2.create_volume(bad_volume.size, bad_volume.zone, latest_snap)
示例#8
0
def create_volume(
        region='us-west-2', zone='us-west-2a', size_gb=None, snapshot=None,
        volume_type='standard', iops=None, dry_run=False):

    ec2 = boto.ec2.connect_to_region(region)
    v = ec2.create_volume(
        size_gb, zone, snapshot=snapshot, volume_type=volume_type, iops=iops,
        dry_run=dry_run)
    return v
示例#9
0
def restore_volumes():
    ec2 = boto.ec2.connect_to_region(region)
    volume_id = 'vol-f95619ec'
    bad_volume = ec2.get_all_volumes([volume_id])[0]
    snaps = bad_volume.snapshots()
    snaps.sort(key=lambda snap: snap.start_time)
    latest_snap = snaps[-1]
    new_volume = ec2.create_volume(bad_volume.size, bad_volume.zone,
                                   latest_snap)
示例#10
0
def create_volume(module, ec2, zone):
    changed = False
    name = module.params.get('name')
    iops = module.params.get('iops')
    encrypted = module.params.get('encrypted')
    kms_key_id = module.params.get('kms_key_id')
    volume_size = module.params.get('volume_size')
    volume_type = module.params.get('volume_type')
    snapshot = module.params.get('snapshot')
    tags = module.params.get('tags')
    # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
    if iops:
        volume_type = 'io1'

    volume = get_volume(module, ec2)
    if volume is None:
        try:
            if boto_supports_volume_encryption():
                if kms_key_id is not None:
                    volume = ec2.create_volume(volume_size, zone, snapshot,
                                               volume_type, iops, encrypted,
                                               kms_key_id)
                else:
                    volume = ec2.create_volume(volume_size, zone, snapshot,
                                               volume_type, iops, encrypted)
                changed = True
            else:
                volume = ec2.create_volume(volume_size, zone, snapshot,
                                           volume_type, iops)
                changed = True

            while volume.status != 'available':
                time.sleep(3)
                volume.update()

            if name:
                tags["Name"] = name
            if tags:
                ec2.create_tags([volume.id], tags)
        except boto.exception.BotoServerError as e:
            module.fail_json_aws(e)

    return volume, changed
示例#11
0
def create_volume(ec2, region, volume_size):

    print 'Create and attach %s EBS volume to %s at %s' % (volume_size,region, device_name)
    # Determine the Availability Zone of the instance

    volume = ec2.create_volume(volume_size, region)
    # Wait for the volume to be created.
    while volume.status != 'available':
        time.sleep(5)
        volume.update()
    return volume
示例#12
0
def create_volume(volume_size, device_name):
    ec2 = ec2 = boto.ec2.connect_to_region(region)
    azone = ec2.get_only_instances()[0].placement
    volume = ec2.create_volume(volume_size, azone)
    while volume.status != 'available':
        print '#'
        time.sleep(5)
        volume.update()
    instance_id = ec2.get_only_instances()[0].id
    if instance_id == u'i-6b46fc53':
        volume.attach(instance_id, device_name)
示例#13
0
def create_volume(volume_size, device_name):
    ec2 = ec2 = boto.ec2.connect_to_region(region)
    azone = ec2.get_only_instances()[0].placement
    volume = ec2.create_volume(volume_size, azone)
    while volume.status != 'available':
        print '#'
        time.sleep(5)
        volume.update()
    instance_id = ec2.get_only_instances()[0].id 
    if instance_id == u'i-6b46fc53':
        volume.attach(instance_id, device_name)
示例#14
0
def create_and_attach_volume(instance, volume_size, device_name):
    ec2 = instance.connection
    # Determine the Availability Zone of the instance
    azone = instance.placement
    
    volume = ec2.create_volume(volume_size, azone)

    # Wait for the volume to be created.
    while volume.status != 'available':
        time.sleep(5)
        volume.update()

    volume.attach(instance.id, device_name)
    return volume
示例#15
0
def create_and_attach_volume(instance, volume_size, device_name):
    ec2 = instance.connection
    # Determine the Availability Zone of the instance
    azone = instance.placement

    volume = ec2.create_volume(volume_size, azone)

    # Wait for the volume to be created.
    while volume.status != 'available':
        time.sleep(5)
        volume.update()

    volume.attach(instance.id, device_name)
    return volume
def create_attach_app_vol(ec2, instance_id, region, zone, app_vol):
    # check that we have enough information to continue
    if 'dev_name' in app_vol and 'mount_point' in app_vol and 'snapshot_id' in app_vol and 'vol_size' in app_vol:
        dev_name    = app_vol['dev_name']
        mount_point = app_vol['mount_point']
        snapshot_id = app_vol['snapshot_id']
        vol_size    = app_vol['vol_size']
    else:
        raise Exception("Not enough information to create application volume");

    if 'delete_on_terminate' in app_vol:
        delete_on_terminate = app_vol['delete_on_terminate']

    # TODO: check that we don't already have a volume created and attached

    # create a volume based off the snapshot and attach it
    print "   creating volume..."
    vol = ec2.create_volume(vol_size, zone, snapshot_id)
    print "   attaching volume..."
    vol.attach(instance_id, dev_name)

    # try up to 5 times to mount (giving the attach time to finish)
    for retries in range(0, 5):
        # give the attach time to finish
        time.sleep(5)

        print "Trying to mount..."
        # mount the volume and break if we're successful
        retcode = subprocess.call(['mount', mount_point])
        if retcode == 0:
            break

    print "Mounted app volume successfully"

    # set volume to delete when we terminate the instance
    # run something like:
    #   ec2-modify-instance-attribute --region us-west-1 --block-device-mapping /dev/sdi=:true i-dba8139c
    if delete_on_terminate == 'true':
        print "Setting delete on terminate"

        # a vain attempt to make sure we can change the instance attribute to delete our app volume
        # on termination of the instance
        time.sleep(10)

        retcode = subprocess.call(['ec2-modify-instance-attribute',
                                   '--region', region,
                                   '--block-device-mapping', dev_name + '=:true',
                                   instance_id])
示例#17
0
def create_volume(region='us-west-2',
                  zone='us-west-2a',
                  size_gb=None,
                  snapshot=None,
                  volume_type='standard',
                  iops=None,
                  dry_run=False):

    ec2 = boto.ec2.connect_to_region(region)
    v = ec2.create_volume(size_gb,
                          zone,
                          snapshot=snapshot,
                          volume_type=volume_type,
                          iops=iops,
                          dry_run=dry_run)
    return v
示例#18
0
 def create(cls, **params):
     getter = CommandLineGetter()
     getter.get(cls, params)
     region = params.get('region')
     ec2 = region.connect()
     zone = params.get('zone')
     size = params.get('size')
     ebs_volume = ec2.create_volume(size, zone.name)
     v = cls()
     v.ec2 = ec2
     v.volume_id = ebs_volume.id
     v.name = params.get('name')
     v.mount_point = params.get('mount_point')
     v.device = params.get('device')
     v.region_name = region.name
     v.put()
     return v
示例#19
0
 def create(cls, **params):
     getter = CommandLineGetter()
     getter.get(cls, params)
     region = params.get('region')
     ec2 = region.connect()
     zone = params.get('zone')
     size = params.get('size')
     ebs_volume = ec2.create_volume(size, zone.name)
     v = cls()
     v.ec2 = ec2
     v.volume_id = ebs_volume.id
     v.name = params.get('name')
     v.mount_point = params.get('mount_point')
     v.device = params.get('device')
     v.region_name = region.name
     v.put()
     return v
示例#20
0
 def create(cls, **params):
     getter = CommandLineGetter()
     getter.get(cls, params)
     region = params.get("region")
     ec2 = region.connect()
     zone = params.get("zone")
     size = params.get("size")
     ebs_volume = ec2.create_volume(size, zone.name)
     v = cls()
     v.ec2 = ec2
     v.volume_id = ebs_volume.id
     v.name = params.get("name")
     v.mount_point = params.get("mount_point")
     v.device = params.get("device")
     v.region_name = region.name
     v.zone_name = zone.name
     v.put()
     return v
示例#21
0
 def create_from_snapshot(self, name, snapshot, size=None):
     if size < self.size:
         size = self.size
     ec2 = self.get_ec2_connection()
     if self.zone_name == None or self.zone_name == '':
         # deal with the migration case where the zone is not set in the logical volume:
         current_volume = ec2.get_all_volumes([self.volume_id])[0]
         self.zone_name = current_volume.zone
     ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
     v = Volume()
     v.ec2 = self.ec2
     v.volume_id = ebs_volume.id
     v.name = name
     v.mount_point = self.mount_point
     v.device = self.device
     v.region_name = self.region_name
     v.zone_name = self.zone_name
     v.put()
     return v
示例#22
0
 def create_from_snapshot(self, name, snapshot, size=None):
     if size < self.size:
         size = self.size
     ec2 = self.get_ec2_connection()
     if self.zone_name == None or self.zone_name == '':
         # deal with the migration case where the zone is not set in the logical volume:
         current_volume = ec2.get_all_volumes([self.volume_id])[0]
         self.zone_name = current_volume.zone
     ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
     v = Volume()
     v.ec2 = self.ec2
     v.volume_id = ebs_volume.id
     v.name = name
     v.mount_point = self.mount_point
     v.device = self.device
     v.region_name = self.region_name
     v.zone_name = self.zone_name
     v.put()
     return v
示例#23
0
def create_ebs(ec2, size, zone, max_timeout):
    # timer variables
    interval = 5

    # create a volume
    response = ec2.create_volume(size=size, volume_type='gp2', zone=zone)
    print "Created volume {volume}".format(volume=response.id)

    # poll until status is 'available' or timeout
    if response is not None:
        start_time = time.time()
        while ( max_timeout >= (time.time() - start_time) ):
            status = get_ebs_status(ec2, response.id)
            print ' * Status of {0} is {1}'.format(response.id, status)

            if status == 'available':
                print '=================='
                return response.id
            else:
                time.sleep(interval)

        raise Exception('Timed out. Volume {0} is not available after {1} seconds.'.format(response.id, max_timeout))
    return
示例#24
0
def get_ebs_volumes(ebs_vol_list, volcount, volsize, volume_type='standard'):
    """Work with Amazon to create EBS volumes, tag them and attach them to the local host"""

    # How large will each volume be?
    individual_vol_size = int(volsize / volcount)

    # Some local instance ID info..
    zone = commands.getoutput("wget -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone")
    region = zone[:-1]
    instanceid = commands.getoutput("wget -q -O - http://169.254.169.254/latest/meta-data/instance-id")
    available_ebs_vol_list = []
    attached_ebs_vol_list = []

    # Open our EC2 connection
    print("INFO: Connecting to Amazon...")
    ec2 = boto.ec2.connect_to_region(region)

    # Make sure that the device list we got is good. If a device exists already,
    # remove it from the potential 'device targets'
    for potential_volume in ebs_vol_list:
        if os.path.exists(potential_volume):
            print("INFO: (%s) is already an attached EBS volume." % (potential_volume))
            attached_ebs_vol_list.append(potential_volume)
        else:
            print("INFO: (%s) is available as a disk target." % (potential_volume))
            available_ebs_vol_list.append(potential_volume)

    # Reverse our available_ebs_vol_list so that we can 'pop' from the beginning
    available_ebs_vol_list.reverse()

    # If we have any EBS volumes already mapped, then just pass them back. Do not create new ones,
    # and do not do anything with them. This script does not support handling multiple sets of EBS
    # volumes.
    if attached_ebs_vol_list.__len__() > 0:
        print("WARNING: EBS volumes are already attached to this host. Passing them back and not touching them.")
        return attached_ebs_vol_list

    # Make sure we have enough target devices available
    if volcount > available_ebs_vol_list.__len__():
        print("ERROR: Do not have enough local volume targets available to attach the drives.")
        sys.exit(1)

    # For each volume..
    for i in range(0, volcount):
        print("INFO: Requesting EBS volume creation (%s gb)..." % (individual_vol_size))

        # 30:1 GB:IOP ratio, with a max of 4000
        iops = individual_vol_size * 30
        if iops > 4000:
            iops = 4000

        if volume_type == 'io1':
            print("INFO: Requesting %s provisioned IOPS..." % iops)
            vol = ec2.create_volume(individual_vol_size, zone,
                                    volume_type=volume_type,
                                    iops=iops)
        else:
            vol = ec2.create_volume(individual_vol_size, zone,
                                    volume_type=volume_type)

        # Wait until the volume is 'available' before attaching
        while vol.status != u'available':
            time.sleep(1)
            print("INFO: Waiting for %s to become available..." % vol)
            vol.update()

        print("INFO: Volume %s status is now: %s..." % (vol, vol.status))

        # Grab a volume off of our stack of available vols..
        dest = available_ebs_vol_list.pop()

        # Attach the volume and wait for it to fully attach
        print("INFO: (%s) Attaching EBS volume to our instance ID (%s) to %s" % (vol.id, instanceid, dest))
        try:
            vol.attach(instanceid, dest.replace('xvd', 'sd'))
        except:
            time.sleep(5)
            vol.attach(instanceid, dest.replace('xvd', 'sd'))

        while not hasattr(vol.attach_data, 'instance_id'):
            time.sleep(1)
            vol.update()
        while not str(vol.attach_data.instance_id) == instanceid or not os.path.exists(dest) == True:
            print("INFO: (%s) Volume attaching..." % (vol.id))
            time.sleep(1)
            vol.update()

        # SLeep a few more seconds just to make sure the OS has seen the volume
        time.sleep(1)

        # Add the volume to our list of volumes that were created
        attached_ebs_vol_list.append(dest)
        print("INFO: (%s) Volume attached!" % (vol.id))

        # Now, tag the volumes and move on
        tags = {}
        tags["Name"] = "%s:%s" % (socket.gethostname(), dest)
        print("INFO: (%s) Taggin EBS volume with these tags: %s" % (vol.id, tags))
        ec2.create_tags(str(vol.id), tags)

    # All done. Return whatever volumes were created and attached.
    return attached_ebs_vol_list
示例#25
0
def postgresql_install(id, node_dict, stage, **options):
    """ Installs postgreSQL """

    if _postgresql_is_installed():
        fabric.api.warn(
            fabric.colors.yellow('PostgreSQL is already installed.'))
        return

    config = get_provider_dict()
    if 'slave' in options:
        master = config['machines'][stage][options['slave']]
        options.update(master['services']['postgresql'])

    package_add_repository('ppa:pitti/postgresql')
    package_install(['postgresql', 'python-psycopg2'])

    # Figure out cluster name
    output = fabric.api.run('pg_lsclusters -h')
    version, cluster = output.split()[:2]

    if 'ec2' in fabric.api.env.conf['PROVIDER']:
        if not options.get('simple'):
            package_install('xfsprogs')
            package_install('mdadm', '--no-install-recommends')

            # Create two ebs volumes
            import boto.ec2
            ec2 = boto.ec2.connect_to_region(
                config['location'][:-1],
                aws_access_key_id=fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
                aws_secret_access_key=fabric.api.env.
                conf['AWS_SECRET_ACCESS_KEY'])

            tag1 = u'%s-1' % id
            tag2 = u'%s-2' % id
            if not any(vol for vol in ec2.get_all_volumes()
                       if vol.tags.get(u'Name') == tag1):
                volume1 = ec2.create_volume(
                    options.get('max-size', 10) / 2, config['location'])
                volume1.add_tag('Name', tag1)
                volume1.attach(node_dict['id'], '/dev/sdf')
            if not any(vol for vol in ec2.get_all_volumes()
                       if vol.tags.get(u'Name') == tag2):
                volume2 = ec2.create_volume(
                    options.get('max-size', 10) / 2, config['location'])
                volume2.add_tag('Name', tag2)
                volume2.attach(node_dict['id'], '/dev/sdg')

            time.sleep(10)

            # RAID 0 together the EBS volumes, and format the result as xfs.  Mount at /data.
            if not fabric.contrib.files.exists('/dev/md0', True):
                fabric.api.sudo(
                    'mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg'
                )
                fabric.api.sudo('mkfs.xfs /dev/md0')

            # Add mountpoint
            if not fabric.contrib.files.exists('/data'):
                fabric.api.sudo('mkdir -p /data')
                fabric.api.sudo('chown postgres:postgres /data')
                fabric.api.sudo('chmod 644 /data')

            # Add to fstab and mount
            append('/etc/fstab', '/dev/md0  /data  auto  defaults  0  0', True)
            with fabric.api.settings(warn_only=True):
                fabric.api.sudo('mount /data')

            # Move cluster/dbs to /data
            if fabric.api.run('pg_lsclusters -h').split()[5] != '/data':
                fabric.api.sudo('pg_dropcluster --stop %s %s' %
                                (version, cluster))
                fabric.api.sudo(
                    'pg_createcluster --start -d /data -e UTF-8 %s %s' %
                    (version, cluster))

    else:
        fabric.api.warn(
            fabric.colors.yellow(
                'PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances'
            ))

    fabric.api.sudo('service postgresql stop')

    # Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections
    pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster)
    fabric.contrib.files.comment(pg_dir + 'postgresql.conf',
                                 'listen_addresses', True)
    append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True)

    append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True)
    fabric.contrib.files.sed(pg_dir + 'pg_hba.conf',
                             "ident",
                             "trust",
                             use_sudo=True)

    # Figure out if we're a master
    if 'slave' not in options and any(
            'slave' in values.get('services', {}).get('postgresql', {})
            for name, values in config['machines'][stage].iteritems()):
        # We're a master!

        append(pg_dir + 'postgresql.conf', [
            'wal_level = hot_standby', 'max_wal_senders = 1',
            'checkpoint_segments = 8', 'wal_keep_segments = 8'
        ], True)

        append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5",
               True)

    elif 'slave' in options:
        # We're a slave!

        append(pg_dir + 'postgresql.conf', [
            'hot_standby = on', 'checkpoint_segments = 8',
            'wal_keep_segments = 8'
        ], True)

        #fabric.api.sudo('rm -rf /data/*')
        append('/data/recovery.conf', [
            "standby_mode = 'on'",
            "primary_conninfo = 'host=%s port=5432 user=%s password=%s'" %
            (master['public_ip'][0], options['user'], options['password']),
            "trigger_file = '/data/failover'"
        ], True)

        fabric.api.local(
            '''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /'''
            % (fabric.api.env.key_filename[0], master['public_ip'][0],
               node_dict['public_ip'][0]))
        fabric.api.sudo('chown -R postgres:postgres /data')

    fabric.api.sudo('service postgresql start')
示例#26
0
def postgresql_install(id, node_dict, stage, **options):
	""" Installs postgreSQL """

	if _postgresql_is_installed():
		fabric.api.warn(fabric.colors.yellow('PostgreSQL is already installed.'))
		return
	
	config = get_provider_dict()
	if 'slave' in options:
		master = config['machines'][stage][options['slave']]
		options.update(master['services']['postgresql'])
	
	package_add_repository('ppa:pitti/postgresql')
	package_install(['postgresql', 'python-psycopg2'])
	
	# Figure out cluster name
	output = fabric.api.run('pg_lsclusters -h')
	version, cluster = output.split()[:2]
	
	if 'ec2' in fabric.api.env.conf['PROVIDER']:
		if not options.get('simple'):
			package_install('xfsprogs')
			package_install('mdadm', '--no-install-recommends')
			
			# Create two ebs volumes
			import boto.ec2
			ec2 = boto.ec2.connect_to_region(config['location'][:-1],
								aws_access_key_id = fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
								aws_secret_access_key = fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'])
			
			tag1 = u'%s-1' % id
			tag2 = u'%s-2' % id
			if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag1):
				volume1 = ec2.create_volume(options.get('max-size', 10)/2, config['location'])
				volume1.add_tag('Name', tag1)
				volume1.attach(node_dict['id'], '/dev/sdf')
			if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag2):
				volume2 = ec2.create_volume(options.get('max-size', 10)/2, config['location'])
				volume2.add_tag('Name', tag2)
				volume2.attach(node_dict['id'], '/dev/sdg')
			
			time.sleep(10)
			
			# RAID 0 together the EBS volumes, and format the result as xfs.  Mount at /data.
			if not fabric.contrib.files.exists('/dev/md0', True):
				fabric.api.sudo('mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg')
				fabric.api.sudo('mkfs.xfs /dev/md0')
			
			# Add mountpoint
			if not fabric.contrib.files.exists('/data'):
				fabric.api.sudo('mkdir -p /data')
				fabric.api.sudo('chown postgres:postgres /data')
				fabric.api.sudo('chmod 644 /data')
			
			# Add to fstab and mount
			append('/etc/fstab', '/dev/md0  /data  auto  defaults  0  0', True)
			with fabric.api.settings(warn_only = True):
				fabric.api.sudo('mount /data')
	
			# Move cluster/dbs to /data
			if fabric.api.run('pg_lsclusters -h').split()[5] != '/data':
				fabric.api.sudo('pg_dropcluster --stop %s %s' % (version, cluster))
				fabric.api.sudo('pg_createcluster --start -d /data -e UTF-8 %s %s' % (version, cluster))
	
	else:
		fabric.api.warn(fabric.colors.yellow('PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances'))

	fabric.api.sudo('service postgresql stop')

	# Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections
	pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster)
	fabric.contrib.files.comment(pg_dir + 'postgresql.conf', 'listen_addresses', True)
	append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True)

	append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True)
	fabric.contrib.files.sed(pg_dir + 'pg_hba.conf', "ident", "trust", use_sudo=True)
	
	# Figure out if we're a master
	if 'slave' not in options and any('slave' in values.get('services', {}).get('postgresql', {})
									  for name, values in config['machines'][stage].iteritems()):
		# We're a master!
		
		append(pg_dir + 'postgresql.conf', [
			'wal_level = hot_standby',
			'max_wal_senders = 1',
			'checkpoint_segments = 8',
			'wal_keep_segments = 8'], True)
		
		append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5", True)
		
	elif 'slave' in options:
		# We're a slave!
		
		append(pg_dir + 'postgresql.conf', [
			'hot_standby = on',
			'checkpoint_segments = 8',
			'wal_keep_segments = 8'], True)
		
		#fabric.api.sudo('rm -rf /data/*')
		append('/data/recovery.conf', [
			"standby_mode = 'on'",
			"primary_conninfo = 'host=%s port=5432 user=%s password=%s'" % (master['public_ip'][0], options['user'], options['password']),
			"trigger_file = '/data/failover'"], True)
		
		fabric.api.local('''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /''' % (fabric.api.env.key_filename[0], master['public_ip'][0], node_dict['public_ip'][0]))
		fabric.api.sudo('chown -R postgres:postgres /data')
	
	fabric.api.sudo('service postgresql start')
示例#27
0
def createVolume(size):
	volumeId = ec2.create_volume(size=size, zone=eucaZone, snapshot=None, volume_type=None, iops=None)
	return volumeId
示例#28
0
    if result.exit_code != 0:
        _err_exit(msg)

if __name__ == "__main__":
    REGION = 'us-east-1'
    ZONE = 'us-east-1a'
    DEVICE = '/dev/xvdcz'
    MOUNT = '/tmp/xvdcz'
    DISK_SZ = 10

    instance_id = boto.utils.get_instance_metadata()['instance-id']
    print("running on instance " + instance_id)

    ec2 = boto.ec2.connect_to_region(REGION)
    print("creating volume...")
    vol = ec2.create_volume(DISK_SZ, ZONE, volume_type='gp2')
    _status_err_exit(vol, 'available', 'creating volume')
    print("created volume " + vol.id)

    print("adding tags...")
    ec2.create_tags([vol.id], {"Name": 'jbox_user_disk_template'})

    print("attaching at " + DEVICE + " ...")
    ec2.attach_volume(vol.id, instance_id, DEVICE)
    if (not _wait_for_status(vol, 'in-use')) or (not _wait_for_device(DEVICE)):
        _err_exit("attaching at " + DEVICE)

    _sh_err_exit(lambda: sh.sudo.mkfs(DEVICE, t="ext4"), 'making ext4 file system')

    if not os.path.exists(MOUNT):
        os.makedirs(MOUNT)
示例#29
0
        _err_exit(msg)


if __name__ == "__main__":
    REGION = 'us-east-1'
    ZONE = 'us-east-1a'
    DEVICE = '/dev/xvdcz'
    MOUNT = '/tmp/xvdcz'
    DISK_SZ = 10

    instance_id = boto.utils.get_instance_metadata()['instance-id']
    print("running on instance " + instance_id)

    ec2 = boto.ec2.connect_to_region(REGION)
    print("creating volume...")
    vol = ec2.create_volume(DISK_SZ, ZONE, volume_type='gp2')
    _status_err_exit(vol, 'available', 'creating volume')
    print("created volume " + vol.id)

    print("adding tags...")
    ec2.create_tags([vol.id], {"Name": 'jbox_user_disk_template'})

    print("attaching at " + DEVICE + " ...")
    ec2.attach_volume(vol.id, instance_id, DEVICE)
    if (not _wait_for_status(vol, 'in-use')) or (not _wait_for_device(DEVICE)):
        _err_exit("attaching at " + DEVICE)

    _sh_err_exit(lambda: sh.sudo.mkfs(DEVICE, t="ext4"),
                 'making ext4 file system')

    if not os.path.exists(MOUNT):