def check(self): storage_filename = self._get_storage_filename() volume_manager = JsonVolumeManager(storage_filename) all_mountable_volumes = [] roles = volume_manager.get_roles() for r in roles: all_mountable_volumes.extend(sum(self._get_mountable_volumes(r),[])) if not all_mountable_volumes: print "No EBS volumes found. Have you executed 'create-storage' first?" return error = False # disable boto ERROR logging for now boto_logging = logging.getLogger('boto') level = boto_logging.level boto_logging.setLevel(logging.FATAL) for vid in [v.volume_id for v in all_mountable_volumes]: try: self.cluster.ec2Connection.get_all_volumes([vid]) except: error = True print "Volume does not exist: %s" % vid if not error: print "Congrats! All volumes exist!" # reset boto logging boto_logging.setLevel(level)
def create(self, role, number_of_instances, availability_zone, spec_filename): spec_file = open(spec_filename, 'r') volume_spec_manager = JsonVolumeSpecManager(spec_file) volume_manager = JsonVolumeManager(self._get_storage_filename()) for dummy in range(number_of_instances): mountable_volumes = [] volume_specs = volume_spec_manager.volume_specs_for_role(role) for spec in volume_specs: logger.info("Creating volume of size %s in %s from snapshot %s" % \ (spec.size, availability_zone, spec.snapshot_id)) volume = self.cluster.ec2Connection.create_volume(spec.size, availability_zone, spec.snapshot_id) mountable_volumes.append(MountableVolume(volume.id, spec.mount_point, spec.device)) volume_manager.add_instance_storage_for_role(role, mountable_volumes)
def get_volumes(self, roles=None, volumes=None): result = [] if volumes is not None: for r, v in volumes: result.append((r,v)) else: if roles is None: storage_filename = self._get_storage_filename() volume_manager = JsonVolumeManager(storage_filename) roles = volume_manager.get_roles() for role in roles: mountable_volumes_list = self._get_mountable_volumes(role) ec2_volumes = self._get_ec2_volumes_dict(mountable_volumes_list) for mountable_volumes in mountable_volumes_list: for mountable_volume in mountable_volumes: result.append((role, ec2_volumes[mountable_volume.volume_id])) return result
def delete(self, roles=[]): storage_filename = self._get_storage_filename() volume_manager = JsonVolumeManager(storage_filename) for role in roles: mountable_volumes_list = volume_manager.get_instance_storage_for_role(role) ec2_volumes = self._get_ec2_volumes_dict(mountable_volumes_list) all_available = True for volume in ec2_volumes.itervalues(): if volume.status != 'available': all_available = False logger.warning("Volume %s is not available.", volume) if not all_available: msg = "Some volumes are still in use. Aborting delete." logger.warning(msg) raise VolumesStillInUseException(msg) for volume in ec2_volumes.itervalues(): volume.delete() volume_manager.remove_instance_storage_for_role(role)
def get_roles(self): storage_filename = self._get_storage_filename() volume_manager = JsonVolumeManager(storage_filename) return volume_manager.get_roles()
def _get_mountable_volumes(self, role): storage_filename = self._get_storage_filename() volume_manager = JsonVolumeManager(storage_filename) return volume_manager.get_instance_storage_for_role(role)