def __mount_ebs_volume(self): """ Attach, format (if necessary) and mount the EBS volume with the same cluster ordinal as this node. """ ebs_volume_size = self.instance_tag('ebs_volume_size') or '0' ebs_volume_size = int(ebs_volume_size) if ebs_volume_size: instance_name = self.instance_tag('Name') cluster_ordinal = int(self.instance_tag('cluster_ordinal')) volume_name = '%s__%d' % (instance_name, cluster_ordinal) volume = EC2VolumeHelper(ec2=self.ec2, availability_zone=self.availability_zone, name=volume_name, size=ebs_volume_size, volume_type="gp2") # TODO: handle case where volume is already attached device_ext = '/dev/sdf' device = '/dev/xvdf' volume.attach(self.instance_id, device_ext) # Wait for inode to appear and make sure its a block device while True: try: assert stat.S_ISBLK(os.stat(device).st_mode) break except OSError as e: if e.errno == errno.ENOENT: time.sleep(1) else: raise # Only format empty volumes volume_label = volume_label_hash(volume_name) if check_output(['file', '-sL', device]).strip() == device + ': data': check_call(['mkfs', '-t', 'ext4', device]) check_call(['e2label', device, volume_label]) else: # If the volume is not empty, verify the file system label actual_label = check_output(['e2label', device]).strip() if actual_label != volume_label: raise AssertionError( "Expected volume label '%s' (derived from '%s') but got '%s'" % (volume_label, volume_name, actual_label)) current_mount_point = self.__mount_point(device) if current_mount_point is None: mkdir_p(self.persistent_dir) check_call(['mount', device, self.persistent_dir]) elif current_mount_point == self.persistent_dir: pass else: raise RuntimeError( "Can't mount device %s on '%s' since it is already mounted on '%s'" % (device, self.persistent_dir, current_mount_point)) else: # No persistent volume is attached and the root volume is off limits, so we will need # to place persistent data on the ephemeral volume. self.persistent_dir = self.ephemeral_dir
def __mount_ebs_volume( self ): """ Attach, format (if necessary) and mount the EBS volume with the same cluster ordinal as this node. """ ebs_volume_size = self.__get_instance_tag( self.instance_id, 'ebs_volume_size' ) or '0' ebs_volume_size = int( ebs_volume_size ) if ebs_volume_size: instance_name = self.__get_instance_tag( self.instance_id, 'Name' ) cluster_ordinal = int( self.__get_instance_tag( self.instance_id, 'cluster_ordinal' ) ) volume_name = '%s__%d' % (instance_name, cluster_ordinal) volume = EC2VolumeHelper( ec2=self.ec2, availability_zone=self.availability_zone, name=volume_name, size=ebs_volume_size, volume_type="gp2") # TODO: handle case where volume is already attached device_ext = '/dev/sdf' device = '/dev/xvdf' volume.attach( self.instance_id, device_ext ) # Wait for inode to appear and make sure its a block device while True: try: assert stat.S_ISBLK( os.stat( device ).st_mode ) break except OSError as e: if e.errno == errno.ENOENT: time.sleep( 1 ) else: raise # Only format empty volumes volume_label = volume_label_hash( volume_name ) if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data': check_call( [ 'mkfs', '-t', 'ext4', device ] ) check_call( [ 'e2label', device, volume_label ] ) else: # if the volume is not empty, verify the file system label actual_label = check_output( [ 'e2label', device ] ).strip( ) if actual_label != volume_label: raise AssertionError( "Expected volume label '%s' (derived from '%s') but got '%s'" % (volume_label, volume_name, actual_label) ) current_mount_point = self.__mount_point( device ) if current_mount_point is None: mkdir_p( self.persistent_dir ) check_call( [ 'mount', device, self.persistent_dir ] ) elif current_mount_point == self.persistent_dir: pass else: raise RuntimeError( "Can't mount device %s on '%s' since it is already mounted on '%s'" % ( device, self.persistent_dir, current_mount_point) ) else: # No persistent volume is attached and the root volume is off limits, so we will need # to place persistent data on the ephemeral volume. self.persistent_dir = self.ephemeral_dir
def __mount_ebs_volume( self ): """ Attach, format (if necessary) and mount the EBS volume with the same cluster ordinal as this node. """ ebs_volume_size = self.__get_instance_tag( self.instance_id, 'ebs_volume_size' ) or '0' ebs_volume_size = int( ebs_volume_size ) if ebs_volume_size: instance_name = self.__get_instance_tag( self.instance_id, 'Name' ) cluster_ordinal = int( self.__get_instance_tag( self.instance_id, 'cluster_ordinal' ) ) volume_name = '%s__%d' % ( instance_name, cluster_ordinal ) volume = EC2VolumeHelper( ec2=self.ec2, availability_zone=self.availability_zone, name=volume_name, size=ebs_volume_size ) # TODO: handle case where volume is already attached volume.attach( self.instance_id, '/dev/sdf' ) # Only format empty volumes volume_label = volume_label_hash( volume_name ) if check_output( [ 'file', '-sL', '/dev/xvdf' ] ).strip( ) == '/dev/xvdf: data': check_call( [ 'mkfs', '-t', 'ext4', '/dev/xvdf' ] ) check_call( [ 'e2label', '/dev/xvdf', volume_label ] ) else: # if the volume is not empty, verify the file system label actual_label = check_output( [ 'e2label', '/dev/xvdf' ] ).strip( ) if actual_label != volume_label: raise AssertionError( "Expected volume label '%s' (derived from '%s') but got '%s'" % ( volume_label, volume_name, actual_label ) ) current_mount_point = self.__mount_point( '/dev/xvdf' ) if current_mount_point is None: check_call( [ 'mount', '/dev/xvdf', self.persistent_dir ] ) elif current_mount_point == self.persistent_dir: pass else: raise RuntimeError( "Can't mount device /dev/xvdf on '%s' since it is already mounted on '%s'" % ( self.persistent_dir, current_mount_point) ) else: # No persistent volume is attached and the root volume is off limits, so we will need # to place persistent data on the ephemeral volume. self.persistent_dir = self.ephemeral_dir