def create(self, request): """ PUT /v1.0/{account_id}/volumes/{id}?size=X&volume_type_name=Z Create volume """ volume_type = self._validate_volume_type(request.params) backup = self._validate_backup(request.params) source = self._validate_source(request.params) size = self._validate_size(request.params, volume_type, backup, source) affinity = self._validate_affinity(request.params) image_id = request.params.get('image_id') status = 'NEW' imaging = False if image_id: # We don't want a huge race condition about scheduling with images # running on a node so we'll go ahead and create in that state. status = 'IMAGING' imaging = True nodes = self.get_recommended_nodes(volume_type.name, size, imaging=imaging, affinity=affinity) volume = Volume(id=self.id, account_id=self.account_id, status=status, volume_type=volume_type, size=size, image_id=image_id) self.db.add(volume) # issue backend request(s) try: volume_info = self._assign_node(volume, backup, source, nodes) except IntegrityError: # duplicate id self.db.rollback() update_params = { 'status': status, 'size': size, 'volume_type_name': volume_type.name, 'image_id': image_id, } # optomistic lock count = self.db.query(Volume).\ filter(and_(Volume.id == self.id, Volume.account_id == self.account_id, Volume.status.in_(['ERROR', 'DELETED']))).\ update(update_params, synchronize_session=False) if not count: raise HTTPConflict("Volume '%s' already exists" % self.id) # still in uncommited update transaction volume = self.db.query(Volume).get(self.id) volume_info = self._assign_node(volume, backup, source, nodes) volume.status = volume_info['status'] self.db.commit() response = dict(volume) response['cinder_host'] = volume.node.cinder_host return Response(response)
def create(self, request): """ PUT /v1.0/{account_id}/volumes/{id}?size=X&volume_type_name=Z Create volume """ name = self._validate_name(request.params) volume_type = self._validate_volume_type(request.params) backup = self._validate_backup(request.params) source = self._validate_source(request.params) size = self._validate_size(request.params, volume_type, backup, source) affinity = self._validate_affinity(request.params) force_node = self._validate_force_node(request.params) image_id = request.params.get('image_id') status = 'NEW' imaging = False if image_id: # We don't want a huge race condition about scheduling with images # running on a node so we'll go ahead and create in that state. status = 'IMAGING' imaging = True nodes = self.get_recommended_nodes(volume_type.name, size, imaging=imaging, affinity=affinity, force_node=force_node) volume = Volume(id=self.id, account_id=self.account_id, status=status, volume_type=volume_type, size=size, image_id=image_id, name=name) self.db.add(volume) # issue backend request(s) try: volume_info = self._assign_node(volume, backup, source, nodes) except IntegrityError: # duplicate id self.db.rollback() update_params = { 'status': status, 'size': size, 'volume_type_name': volume_type.name, 'image_id': image_id, 'name': name, } # optomistic lock count = self.db.query(Volume).\ filter(and_(Volume.id == self.id, Volume.account_id == self.account_id, Volume.status.in_(['ERROR', 'DELETED']))).\ update(update_params, synchronize_session=False) if not count: raise HTTPConflict("Volume '%s' already exists" % self.id) # still in uncommited update transaction volume = self.db.query(Volume).get(self.id) volume_info = self._assign_node(volume, backup, source, nodes) volume.status = volume_info['status'] self.db.commit() response = dict(volume) response['cinder_host'] = volume.node.cinder_host return Response(response)