def _ensure(self): self._cinder = self._new_cinder_connection() assert self._cinder or self.id, self.error_messages['no_id_or_conn'] if self._cinder: volume = None if self.id: try: volume = self._cinder.volumes.get(self.id) except cinder_exc.NotFound: raise storage2.VolumeNotExistsError(self.id) if volume.availability_zone != self.avail_zone: LOG.warn('Cinder volume %s is in the different ' 'availability zone (%s). Snapshoting it ' 'and create a new Cinder volume in %s', volume.id, volume.availability_zone, self.avail_zone) self.snapshot_id = self._create_snapshot(self.id).id self.id = None volume = None else: self.size = volume.size elif self.snap: self.snapshot_id = self.snap['id'] #TODO: take tags from snapshot, if they exist # if not self.tags: # self.tags = self.snap.get('tags', {}) if not self.id: volume = self._create_volume(size=self.size, snapshot_id=self.snapshot_id, avail_zone=self.avail_zone, volume_type=self.volume_type) self.size = volume.size self.id = volume.id server_ids = map(lambda info: info['server_id'], volume.attachments) my_server_id = __openstack__['server_id'] if not (volume.status == 'in-use' and my_server_id in server_ids): self._wait_status_transition() if len(volume.attachments) > 0: self._detach_volume() device = self._attach_volume(server_id=my_server_id) elif not self.device: device = volume.attachments[0]['device'] self._config.update({ 'id': volume.id, 'avail_zone': volume.availability_zone, 'size': volume.size, 'volume_type': volume.volume_type, 'device': device})
def resource(self): connection = __node__['gce'].connect_compute() project_id = __node__['gce']['project_id'] try: return connection.disks().get(disk=self.name, project=project_id, zone=self.zone).execute() except HttpError, e: code = int(e.resp['status']) if code == 404: raise storage2.VolumeNotExistsError('Volume %s not found in %s zone' % (self.name, self.zone)) else: raise
def _ensure(self): ''' Algo: if id: ebs = get volume if ebs in different zone: create snapshot del id if not id: ebs = create volume if not ebs is in-use by this server: if attaching or detaching: wait for state change if in-use: detach volume attach volume ''' self._conn = self._connect_ec2() assert self._conn or self.id, self.error_messages['no_id_or_conn'] if self._conn: zone = self._avail_zone() snap = name = None size = self.size() if callable(self.size) else self.size encrypted = self.encrypted if self.id: try: ebs = self._conn.get_all_volumes([self.id])[0] except boto.exception.BotoServerError, e: if e.code == 'InvalidVolume.NotFound': raise storage2.VolumeNotExistsError(self.id) raise if ebs.zone != zone: LOG.warn( 'EBS volume %s is in the different ' 'availability zone (%s). Snapshoting it ' 'and create a new EBS volume in %s', ebs.id, ebs.zone, zone) snap = self._create_snapshot(self.id).id self.id = ebs = None else: size = ebs.size elif self.snap: snap = self.snap['id'] if not self.tags: self.tags = self.snap.get('tags', {}) if not self.id: ebs = self._create_volume(zone=zone, size=self.size, snapshot=snap, volume_type=self.volume_type, iops=self.iops, tags=self.tags, encrypted=self.encrypted) size = ebs.size encrypted = ebs.encrypted if not (ebs.volume_state() == 'in-use' and ebs.attach_data.instance_id == self._instance_id()): if ebs.attachment_state() in ('attaching', 'detaching'): self._wait_attachment_state_change(ebs) if ebs.attachment_state() == 'attached': self._detach_volume(ebs) device, name = self._attach_volume(ebs) else: name = ebs.attach_data.device device = name2device(name) self._config.update({ 'id': ebs.id, 'name': name, 'device': device, 'avail_zone': zone, 'size': size, 'snap': None, 'encrypted': encrypted })
def _ensure(self): self._native_vol = None snapshot_id = None self._conn = self._new_conn() devname = self.device if self._conn: try: if self.snap: snapshot_id = self.snap['id'] self.id = None self.size = None if self.id: LOG.debug('Volume %s has been already created', self.id) vol_list = self._conn.listVolumes(id=self.id) if len(vol_list) == 0: raise storage2.VolumeNotExistsError(self.id) self._native_vol = vol_list[0] self._check_attachement() if not self.id: LOG.debug('Creating new volume') if not self.disk_offering_id: # Any size you want for dskoffer in self._conn.listDiskOfferings(): if not dskoffer.disksize and dskoffer.iscustomized: self.disk_offering_id = dskoffer.id break with self._free_device_letter_mgr: letter = self._free_device_letter_mgr.get() devname = get_system_devname(letter) self._native_vol = self._create_volume( name='%s-%02d' % (__cloudstack__['instance_id'], devname_to_deviceid(devname)), zone_id=__cloudstack__['zone_id'], size=self.size, disk_offering_id=self.disk_offering_id, snap_id=snapshot_id) self.id = self._native_vol.id devname = self._attach(__cloudstack__['instance_id'], devname_to_deviceid(devname))[1] self._native_vol = self._conn.listVolumes( id=self.id)[0] except: exc_type, exc_value, exc_trace = sys.exc_info() if self._native_vol: LOG.debug('Detaching volume') try: self._conn.detachVolume(id=self._native_vol.id) except: pass raise storage2.StorageError, \ 'Volume construction failed: %s' % exc_value, \ exc_trace self._config.update({ 'id': self._native_vol.id, 'size': self._native_vol.size / (1024 * 1024 * 1024), 'device': devname, 'zone_id': self._native_vol.zoneid, 'disk_offering_id': getattr(self._native_vol, 'diskofferingid', None) }) self._native_vol = None
def _ensure(self): self._native_vol = None snapshot_id = None self._conn = self._new_conn() devname = self.device if self._conn: try: if self.snap: snapshot_id = self.snap['id'] self.id = None self.size = None if self.id: LOG.debug('Volume %s has been already created', self.id) int_errs = 0 while True: try: LOG.debug('XXX: Volumes attached to terminated instances ' \ 'are not visible in listVolumes view. ' \ 'Calling detachVolume to force volume be visibile') self._conn.detachVolume(id=self.id) except Exception, e: msg = str(e) if 'does not exist' in msg: raise storage2.VolumeNotExistsError(self.id) if 'not attached' in msg: break if 'Internal error executing command' in msg: int_errs += 1 if int_errs >= 10: raise time.sleep(30) continue # pass other errors break try: vol_list = self._conn.listVolumes(id=self.id) except: if 'Expected list, got null' in str(sys.exc_info()[1]): raise storage2.VolumeNotExistsError(self.id) raise else: if len(vol_list) == 0: raise storage2.VolumeNotExistsError(self.id) self._native_vol = vol_list[0] devname = self._check_attachement() if not self.id: LOG.debug('Creating new volume') if not self.disk_offering_id: # Any size you want for dskoffer in self._conn.listDiskOfferings(): if not dskoffer.disksize and dskoffer.iscustomized: self.disk_offering_id = dskoffer.id break self._native_vol = self._create_volume( name=getattr(self, 'scalr_storage_id', str(uuid.uuid4())), zone_id=__cloudstack__['zone_id'], size=self.size, disk_offering_id=self.disk_offering_id, snap_id=snapshot_id) self.id = self._native_vol.id devname = self._attach(__cloudstack__['instance_id']) self._native_vol = self._conn.listVolumes(id=self.id)[0] except storage2.StorageError: raise
def _ensure(self): garbage_can = [] zone = os.path.basename(__node__['gce']['zone']) project_id = __node__['gce']['project_id'] server_name = __node__['server_id'] try: connection = __node__['gce'].connect_compute() except: e = sys.exc_info()[1] LOG.debug('Can not get GCE connection: %s' % e) """ No connection, implicit check """ try: self._check_attr('name') except: raise storage2.StorageError( 'Disk is not created yet, and GCE connection is unavailable' ) device = gce_util.devicename_to_device(self.name) if not device: raise storage2.StorageError( "Disk is not attached and GCE connection is unavailable") self.device = device else: LOG.debug('Successfully created connection to cloud engine') try: create = False if not self.link: # Disk does not exist, create it first create_request_body = dict(name=self.name) if self.snap: snap_dict = dict(self.snap) snap_dict['type'] = STORAGE_TYPE self.snap = storage2.snapshot(snap_dict) LOG.debug( 'Ensuring that snapshot is ready, before creating disk from it' ) gce_util.wait_snapshot_ready(self.snap) create_request_body[ 'sourceSnapshot'] = to_current_api_version( self.snap.link) else: create_request_body['sizeGb'] = self.size create = True else: self._check_attr('zone') LOG.debug('Checking that disk already exists') try: disk_dict = connection.disks().get( disk=self.name, project=project_id, zone=zone).execute() self.link = disk_dict['selfLink'] except HttpError, e: code = int(e.resp['status']) if code == 404: raise storage2.VolumeNotExistsError(self.name) else: raise if self.zone != zone: # Volume is in different zone, snapshot it, # create new volume from this snapshot, then attach temp_snap = self.snapshot('volume') garbage_can.append(temp_snap) new_name = self.name + zone create_request_body = dict( name=new_name, sourceSnapshot=to_current_api_version( temp_snap.link)) create = True attach = False if create: disk_name = create_request_body['name'] if "pd-standard" != self.disk_type: disk_type = gce_util.get_disktype( conn=connection, project_id=project_id, zone=zone, disktype=self.disk_type) create_request_body.update( {'type': disk_type['selfLink']}) LOG.debug('Creating new GCE disk %s' % disk_name) op = connection.disks().insert( project=project_id, zone=zone, body=create_request_body).execute() gce_util.wait_for_operation(connection, project_id, op['name'], zone) disk_dict = connection.disks().get(disk=disk_name, project=project_id, zone=zone).execute() self.id = disk_dict['id'] self.link = disk_dict['selfLink'] self.zone = zone self.name = disk_name attach = True else: if self.last_attached_to and self.last_attached_to != server_name: LOG.debug( "Making sure that disk %s detached from previous attachment place." % self.name) try: gce_util.ensure_disk_detached( connection, project_id, zone, self.last_attached_to, self.link) except: e = sys.exc_info()[1] if 'resource was not found' in str(e): raise storage2.VolumeNotExistsError(self.link) raise attachment_inf = self._attachment_info(connection) if attachment_inf: disk_devicename = attachment_inf['deviceName'] else: attach = True if attach: LOG.debug('Attaching disk %s to current instance' % self.name) try: op = connection.instances().attachDisk( instance=server_name, project=project_id, zone=zone, body=dict(deviceName=self.name, source=self.link, mode="READ_WRITE", type="PERSISTENT")).execute() except: e = sys.exc_info()[1] if 'resource was not found' in str(e): raise storage2.VolumeNotExistsError(self.link) raise gce_util.wait_for_operation(connection, project_id, op['name'], zone=zone) disk_devicename = self.name for i in range(10): device = gce_util.devicename_to_device(disk_devicename) if device: break LOG.debug('Device not found in system. Retrying in 1s.') time.sleep(1) else: raise storage2.StorageError( "Disk should be attached, but corresponding device not found in system" ) self.device = device self.last_attached_to = server_name self.snap = None finally: # Perform cleanup for garbage in garbage_can: try: garbage.destroy(force=True) except: e = sys.exc_info()[1] LOG.debug( 'Failed to destroy temporary storage object %s: %s', garbage, e)