def clean_group_avatar(self): data = self.cleaned_data['group_avatar'] if GROUP_AVATAR_ALLOWED_FILE_EXTS: (root, ext) = os.path.splitext(data.name.lower()) if ext not in GROUP_AVATAR_ALLOWED_FILE_EXTS: raise forms.ValidationError( _("%(ext)s is an invalid file extension. " "Authorized extensions are : %(valid_exts_list)s") % {'ext': ext, 'valid_exts_list': ", ".join(GROUP_AVATAR_ALLOWED_FILE_EXTS)}) if data.size > GROUP_AVATAR_MAX_SIZE: error = _("Your file is too big (%(size)s), " "the maximum allowed size is %(max_valid_size)s") raise forms.ValidationError(error % { 'size': filesizeformat(data.size), 'max_valid_size': filesizeformat(GROUP_AVATAR_MAX_SIZE) }) count = GroupAvatar.objects.filter(group=self.group).count() if GROUP_AVATAR_MAX_AVATARS_PER_USER > 1 and count >= GROUP_AVATAR_MAX_AVATARS_PER_USER: error = _("You already have %(nb_avatars)d avatars, " "and the maximum allowed is %(nb_max_avatars)d.") raise forms.ValidationError(error % { 'nb_avatars': count, 'nb_max_avatars': GROUP_AVATAR_MAX_AVATARS_PER_USER, }) return
def image_clean(self): #CONTENT_TYPES = ['image', 'video', 'audio', ] CONTENT_TYPES = ['image', ] # Accepted content types: image/gif; image/jpg; image/png CONTENT_SUBTYPES = ['jpg', 'png', 'gif', ] # 2.5MB - 2621440 # 5MB - 5242880 # 10MB - 10485760 # 20MB - 20971520 # 50MB - 5242880 # 100MB 104857600 # 250MB - 214958080 # 500MB - 429916160 # MB - 5242880 MAX_UPLOAD_SIZE = '2621440' image = self.cleaned_data['image'] # type content_type = content.content_type.split('/')[0] subtype = content.content_type.split('/')[1] if content_type in CONTENT_TYPES: if content._size > MAX_UPLOAD_SIZE: raise forms.ValidationError(_('Please keep filesize under %(bytes)s. Current filesize is %(currsize)s') % (filesizeformat(MAX_UPLOAD_SIZE), (filesizeformat(context._size)))) if subtype not in CONTENT_SUBTYPES: raise forms.ValidationError(_('Only \'jpg\', \'gif\' and \'png\' image files allowed!')) else: raise forms.ValidationError(_('File type not supported.'))
def __call__(self, data): validation_errors = [] if self.max_size is not None and data.size > self.max_size: params = { 'max_size': filesizeformat(self.max_size), 'size': filesizeformat(data.size), } validation_errors.append(ValidationError(self.error_messages['max_size'], 'max_size', params)) if self.min_size is not None and data.size < self.min_size: params = { 'min_size': filesizeformat(self.min_size), 'size': filesizeformat(data.size) } validation_errors.append(ValidationError(self.error_messages['min_size'], 'min_size', params)) if self.content_types: try: data.seek(0) # reset file position content_type = magic.from_buffer(data.read(), mime=True).decode("utf-8") data.seek(0) # reset file position (again) if content_type not in self.content_types: params = {'content_type': content_type} validation_errors.append(ValidationError(self.error_messages['content_type'], 'content_type', params)) except AttributeError: validation_errors.append(ValidationError(self.error_messages['validation_error'], 'validation_error')) raise ValidationError(validation_errors)
def __call__(self, data): if self.max_size is not None and data.size > self.max_size: logger.error("file upload - file too large (" + str(data.size) + ")") params = { 'max_size': filesizeformat(self.max_size), 'size': filesizeformat(data.size), } raise ValidationError(self.error_messages['max_size'], 'max_size', params) if self.min_size is not None and data.size < self.min_size: logger.error("file upload - file too small (" + str(data.size) + ")") params = { 'min_size': filesizeformat(self.mix_size), 'size': filesizeformat(data.size) } raise ValidationError(self.error_messages['min_size'], 'min_size', params) if self.content_types: content_type = magic.from_buffer(data.read(), mime=True) if content_type not in self.content_types: logger.error("file upload - content type not allowed (" + content_type + ")") params = {'content_type': content_type} raise ValidationError(self.error_messages['content_type'], 'content_type', params)
def clean_logo_file(self): ALLOWED_LOGO_EXT = ( '.jpg', '.jpeg', '.gif', '.png' ) logo_file = self.cleaned_data['logo_file'] if logo_file: try: extension = splitext(logo_file.name)[1] # check the extension if extension.lower() not in ALLOWED_LOGO_EXT: raise forms.ValidationError(_('The logo must be of jpg, gif, or png image type.')) # check the image header image_type = '.%s' % imghdr.what('', logo_file.read()) if image_type not in ALLOWED_LOGO_EXT: raise forms.ValidationError(_('The logo is an invalid image. Try uploading another logo.')) max_upload_size = get_max_file_upload_size() if logo_file.size > max_upload_size: raise forms.ValidationError(_('Please keep filesize under %(max_upload_size)s. Current filesize %(logo_size)s') % { 'max_upload_size': filesizeformat(max_upload_size), 'logo_size': filesizeformat(logo_file.size)}) except IOError: logo_file = None return logo_file
def clean(self, *args, **kwargs): data = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs) file = data.file try: content_type = file.content_type if content_type in self.content_types: if file._size > self.max_upload_size: raise forms.ValidationError(_( u'Файл не должен быть больше %(max_size)s. ' u'Текущий размер %(cur_size)s') % {'max_size': filesizeformat(self.max_upload_size), 'cur_size': filesizeformat(file._size)}) else: raise forms.ValidationError(_( u'Расширение файла не поддерживается.')) except AttributeError: pass # def redirect_for_login(request): # referer = request.META.get('HTTP_REFERER', None) # to = '/' # if referer: # parsed_uri = urlparse(referer) # referer_host = '{uri.netloc}'.format(uri=parsed_uri) # if referer_host == request.META.get('HTTP_HOST', None): # to = referer # # return HttpResponseRedirect(to + '?next=' + request.path_info)
def __download_func(current, total, width=None): self.update_status['file']['current'] = current self.update_status['file']['total'] = total self.update_percent(self.update_status['file']) self.update_status['file']['current'] = filesizeformat(current) self.update_status['file']['total'] = filesizeformat(total) return ''
def clean_header_image(self): header_image = self.cleaned_data["header_image"] if header_image: extension = splitext(header_image.name)[1] # check the extension if extension.lower() not in ALLOWED_IMG_EXT: raise forms.ValidationError(_("The header image must be of jpg, gif, or png image type.")) # check the image header_image image_type = ".%s" % imghdr.what("", header_image.read()) if image_type not in ALLOWED_IMG_EXT: raise forms.ValidationError(_("The header image is an invalid image. Try uploading another image.")) max_upload_size = get_max_file_upload_size() if header_image.size > max_upload_size: raise forms.ValidationError( _("Please keep filesize under %(max_upload_size)s. Current filesize %(header_image)s") % { "max_upload_size": filesizeformat(max_upload_size), "header_image": filesizeformat(header_image.size), } ) return header_image
def clean(self, data): """ Cleans the data and makes sure that all the files had some content. Also checks whether a file was required. """ super(MultiFileField, self).clean(data) if not self.required and data in EMPTY_VALUES: return None f = data totalsize = 0 for a_file in f: if a_file.size == 0: raise ValidationError(ugettext(u"The submitted file is empty.")) if a_file.size > int(settings.MAX_A_FILE_SIZE): raise ValidationError(u"파일의 크기가 너무 큽니다. 최대 %s. 현재 %s."%(filesizeformat(int(settings.MAX_A_FILE_SIZE)), filesizeformat(a_file.size))) totalsize = totalsize + a_file.size if totalsize > int(settings.MAX_TOTAL_FILE_SIZE): raise ValidationError(u"모든 파일의 총합이 너무 큽니다. 최대 %s. 현재 %s."%(filesizeformat(int(settings.MAX_TOTAL_FILE_SIZE)), filesizeformat(totalsize))) if self.strict and len(f) != self.count: raise ValidationError(ugettext(u"An incorrect number of files were uploaded.")) return f
def __call__(self, file): if not isinstance(file, models.FileField): params = { 'type': type(file), } raise ValidationError(self.error_messages['value_type'], 'type', params) if self.max_size is not None and file.size > self.max_size: params = { 'max_size': filesizeformat(self.max_size), 'size': filesizeformat(file.size), } raise ValidationError(self.error_messages['max_size'], 'max_size', params) if self.min_size is not None and file.size < self.min_size: params = { 'min_size': filesizeformat(self.min_size), 'size': filesizeformat(file.size), } raise ValidationError(self.error_messages['min_size'], 'min_size', params) if self.content_types: content_type = magic.from_buffer(file.read(), mime=True) if content_type not in self.content_types: params = {'content_type': content_type} raise ValidationError(self.error_messages['content_type'], 'content_type', params)
def clean(self): """Validate test_plan file.""" content_types = ['application/pdf', 'application/vnd.ms-excel'] max_upload_size = 2621440 # 2.5MB if 'test_plan' not in self.files: raise forms.ValidationError(_('Test plan required.')) file = self.files['test_plan'] content_type = file.content_type if content_type in content_types: if file._size > max_upload_size: msg = _('File too large. Keep size under %s. Current size %s.') msg = msg % (filesizeformat(max_upload_size), filesizeformat(file._size)) self._errors['test_plan'] = self.error_class([msg]) raise forms.ValidationError(msg) else: msg = (_('Invalid file type. Only %s files are supported.') % ', '.join(content_types)) self._errors['test_plan'] = self.error_class([msg]) raise forms.ValidationError(msg) return self.cleaned_data
def clean_files(form): total_upload_size = 0 for field in form.file_fields: uploaded_file = form.cleaned_data.get(field.name, None) msg = None if uploaded_file is None: if field.required: msg = _('This field is required.') else: continue else: total_upload_size += uploaded_file._size if not os.path.splitext(uploaded_file.name)[1].lstrip('.').lower() in \ app_settings.ALLOWED_FILE_TYPES: msg = _('This file type is not allowed.') elif uploaded_file._size > app_settings.MAX_UPLOAD_SIZE: msg = _('Please keep file size under %(max_size)s. Current size is %(size)s.') % \ {'max_size': filesizeformat(app_settings.MAX_UPLOAD_SIZE), 'size': filesizeformat(uploaded_file._size)} if msg: form._errors[field.name] = form.error_class([msg]) if total_upload_size > app_settings.MAX_UPLOAD_TOTAL_SIZE: msg = _('Please keep total file size under %(max)s. Current total size is %(current)s.') % \ {"max": filesizeformat(app_settings.MAX_UPLOAD_TOTAL_SIZE), "current": filesizeformat(total_upload_size)} if NON_FIELD_ERRORS in form._errors: form._errors[NON_FIELD_ERRORS].append(msg) else: form._errors[NON_FIELD_ERRORS] = form.error_class([msg]) return form.cleaned_data
def __call__(self, file): file_size = file.size if self.quota.exceeds(file_size): raise forms.ValidationError( _('Please keep the total uploaded files under %(total_size)s. With this file, the total would be %(exceed_size)s.' % {'total_size': filesizeformat(self.quota.max_usage), 'exceed_size': filesizeformat(self.quota.current_usage + file_size)}) )
def clean_content(self): t = ExtFileField(ext_whitelist=FILES_WHITELIST) if self.prefix: field_name = "%s-content" % self.prefix else: field_name = "content" if not self.files.has_key(field_name): return file_field = self.files[field_name] t.clean(file_field) if file_field._size > settings.MAX_UPLOAD_SIZE: raise forms.ValidationError( _("Please keep filesize under %s. Current filesize is %s") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(file_field._size)) ) fname = file_field.name extn = fname[(fname.rfind(".") + 1) :] print extn fname = "%s/files/%s-%d-%d.%s" % ( settings.MEDIA_ROOT, util.camelize(self.question.event.name), self.instance.answered_by.id, self.instance.question.id, extn, ) file_field.name = fname if os.path.isfile(fname): os.remove(fname) # Django takes care of saving the file return file_field
def clean(self, *args, **kwargs): file = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs) try: content_type = file.content_type print "Submission content type: {}".format(content_type) if content_type in self.content_types: print "Submission file size: {}".format(file._size) if file._size > self.max_upload_size: raise forms.ValidationError( _('Please keep filesize under %(size_limit)s. Current filesize %(current_size)s'), params={ 'size_limit': filesizeformat(self.max_upload_size), 'current_size': filesizeformat(file._size) } ) else: raise forms.ValidationError( _("Filetype '%(current_type)s' is not supported."), params={ 'current_type': file.content_type } ) except AttributeError: raise return file
def clean(self, *args, **kwargs): from django.core.files.images import get_image_dimensions data = super(ExtendedImageField, self).clean(*args, **kwargs) image = data.file # Controls the file size if self.max_upload_size and hasattr(image, 'size'): if image.size > self.max_upload_size: raise ValidationError( _('Files of size greater than {} are not allowed. Your file is {}').format( filesizeformat(self.max_upload_size), filesizeformat(image.size) ) ) # Controls the image size image_width, image_height = get_image_dimensions(data) if (self.min_width and self.max_width and not self.min_width <= image_width <= self.max_width): raise ValidationError( _('Images of width lesser than {}px or greater than {}px or are not allowed. The width of your image is {}px').format( self.min_width, self.max_width, image_width)) if self.min_height and self.max_height and not self.min_height <= image_height <= self.max_height: raise ValidationError( _('Images of height lesser than {}px or greater than {}px or are not allowed. The height of your image is {}px').format( self.min_height, self.max_height, image_height)) return data
def clean(self): """Validate test_plan file.""" content_types = [ 'application/pdf', 'application/vnd.pdf', 'application/ms-excel', 'application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.' 'sheet' ] max_upload_size = 2621440 # 2.5MB if 'test_plan' not in self.files: raise forms.ValidationError(_('Test plan required.')) file = self.files['test_plan'] content_type = mimetypes.guess_type(file.name)[0] if content_type in content_types: if file._size > max_upload_size: msg = _('File too large. Keep size under %s. Current size %s.') msg = msg % (filesizeformat(max_upload_size), filesizeformat(file._size)) self._errors['test_plan'] = self.error_class([msg]) raise forms.ValidationError(msg) else: msg = (_('Invalid file type {0}. Only {1} files are supported.') .format(content_type, ', '.join(content_types))) self._errors['test_plan'] = self.error_class([msg]) raise forms.ValidationError(msg) return self.cleaned_data
def clean_avatar(self): data = self.cleaned_data['avatar'] if settings.AVATAR_ALLOWED_FILE_EXTS: root, ext = os.path.splitext(data.name.lower()) if ext not in settings.AVATAR_ALLOWED_FILE_EXTS: valid_exts = ", ".join(settings.AVATAR_ALLOWED_FILE_EXTS) error = _("%(ext)s is an invalid file extension. " "Authorized extensions are : %(valid_exts_list)s") raise forms.ValidationError(error % {'ext': ext, 'valid_exts_list': valid_exts}) if data.size > settings.AVATAR_MAX_SIZE: error = _("Your file is too big (%(size)s), " "the maximum allowed size is %(max_valid_size)s") raise forms.ValidationError(error % { 'size': filesizeformat(data.size), 'max_valid_size': filesizeformat(settings.AVATAR_MAX_SIZE) }) count = Avatar.objects.filter(user=self.user).count() if (settings.AVATAR_MAX_AVATARS_PER_USER > 1) and (count >= settings.AVATAR_MAX_AVATARS_PER_USER): error = _("You already have %(nb_avatars)d avatars, " "and the maximum allowed is %(nb_max_avatars)d.") raise forms.ValidationError(error % { 'nb_avatars': count, 'nb_max_avatars': settings.AVATAR_MAX_AVATARS_PER_USER, }) return
def clean_document(self): document = self.cleaned_data['document'] if hasattr(document, '_size'): if document.size > settings.MAX_UPLOAD_SIZE: raise forms.ValidationError(u'Bitte Dateigrösse unter %s halten. Aktuelle Dateigrösse ist %s' % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(document._size))) return document
def _max_upload_size(file, _max_size): if file.size > _max_size: error_msg = 'Please keep filesize under %(max_filesize)s. Current filesize %(cur_filesize)s.' raise ValidationError(ugettext(error_msg) % { 'max_filesize': filesizeformat(_max_size), 'cur_filesize': filesizeformat(file.size) })
def clean_photo_upload(self): photo_upload = self.cleaned_data['photo_upload'] if photo_upload: extension = splitext(photo_upload.name)[1] # check the extension if extension.lower() not in ALLOWED_LOGO_EXT: raise forms.ValidationError( _('The photo must be of jpg, gif, or png image type.')) # check the image header image_type = '.%s' % imghdr.what('', photo_upload.read()) if image_type not in ALLOWED_LOGO_EXT: raise forms.ValidationError( _('The photo is an invalid image. Try uploading another photo.' )) max_upload_size = get_max_file_upload_size() if photo_upload.size > max_upload_size: raise forms.ValidationError( _('Please keep filesize under %(max_upload_size)s. Current filesize %(upload_size)s' ) % { 'max_upload_size': filesizeformat(max_upload_size), 'upload_size': filesizeformat(photo_upload.size) }) return photo_upload
def monitoring_size(): size = OmahaVersion.objects.get_size() if size > gpm['Version__limit_size'] * 1024 * 1024 * 1024: raven.captureMessage("[Limitation]Size limit of omaha versions is exceeded. Current size is %s [%d]" % (filters.filesizeformat(size).replace(u'\xa0', u' '), time.time()), data={'level': 30, 'logger': 'limitation'}) cache.set('omaha_version_size', size) size = SparkleVersion.objects.get_size() if size > gpm['SparkleVersion__limit_size'] * 1024 * 1024 * 1024: raven.captureMessage("[Limitation]Size limit of sparkle versions is exceeded. Current size is %s [%d]" % (filters.filesizeformat(size).replace(u'\xa0', u' '), time.time()), data={'level': 30, 'logger': 'limitation'}) cache.set('sparkle_version_size', size) size = Feedback.objects.get_size() if size > gpm['Feedback__limit_size'] * 1024 * 1024 * 1024: raven.captureMessage("[Limitation]Size limit of feedbacks is exceeded. Current size is %s [%d]" % (filters.filesizeformat(size).replace(u'\xa0', u' '), time.time()), data={'level': 30, 'logger': 'limitation'}) cache.set('feedbacks_size', size) size = Crash.objects.get_size() if size > gpm['Crash__limit_size'] * 1024 * 1024 * 1024: raven.captureMessage("[Limitation]Size limit of crashes is exceeded. Current size is %s [%d]" % (filters.filesizeformat(size).replace(u'\xa0', u' '), time.time()), data={'level': 30, 'logger': 'limitation'}) cache.set('crashes_size', size) size = Symbols.objects.get_size() if size > gpm['Symbols__limit_size'] * 1024 * 1024 * 1024: raven.captureMessage("[Limitation]Size limit of symbols is exceeded. Current size is %s [%d]" % (filters.filesizeformat(size).replace(u'\xa0', u' '), time.time()), data={'level': 30, 'logger': 'limitation'}) cache.set('symbols_size', size)
def clean(self, *args, **kwargs): data = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs) file_obj = data.file # Get the content type content_type = getattr(file_obj, 'content_type', None) # If it is not there (this is true for File objects, i.e. already saved # files), just return if not content_type: return data # Otherwise check the content_type and size if self.content_types is None or content_type in self.content_types: if self.max_size is None or file_obj._size > int(self.max_size): raise forms.ValidationError( _('Please keep filesize under %s. Current filesize %s') % (filesizeformat(self.max_size), filesizeformat(file_obj._size))) else: raise forms.ValidationError(_('Filetype not supported.')) return data
def clean(self, *args, **kwargs): data = super(RestrictedFileField, self).clean(*args, **kwargs) file = data.file if self.content_types is not None: content_type_headers = getattr(file, 'content_type', '') file_magic = magic.Magic(mime=True) file_content_type = file_magic.from_buffer(file.read(1024)) file.seek(0) if not content_type_headers in self.content_types or not file_content_type in self.content_types: raise forms.ValidationError(_('Files of type {type} are not supported.'.format(type=file_content_type))) if self.min_upload_size is not None: if file._size < self.min_upload_size: raise forms.ValidationError( _('Files of size less than {min_size} are not allowed. Your file is {current_size}'.format( max_size=filesizeformat(self.min_upload_size), current_size=filesizeformat(file._size) ))) if self.max_upload_size is not None: if file._size > self.max_upload_size: raise forms.ValidationError( _('Files of size greater than {max_size} are not allowed. Your file is {current_size}'.format( max_size=filesizeformat(self.max_upload_size), current_size=filesizeformat(file._size) ))) return data
def clean_receipt(self): receipt = self.cleaned_data['receipt'] size = getattr(receipt, '_size', 0) if size > settings.MAX_UPLOAD_SIZE: raise forms.ValidationError("Please keep resume under %s. Current filesize %s" % ( filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size))) return receipt
def __call__(self, data): if self.max_size is not None and data.size > self.max_size: params = { 'max_size': filesizeformat(self.max_size), 'size': filesizeformat(data.size), } raise ValidationError(self.error_messages['max_size'], 'max_size', params) if self.min_size is not None and data.size < self.min_size: params = { 'min_size': filesizeformat(self.mix_size), 'size': filesizeformat(data.size) } raise ValidationError(self.error_messages['min_size'], 'min_size', params) if self.content_types: content_type = magic.from_buffer(data.read(), mime=True) if content_type not in self.content_types: params = { 'content_type': content_type } raise ValidationError(self.error_messages['content_type'], 'content_type', params) def __eq__(self, other): return isinstance(other, FileValidator)
def clean_content(self): #Here we do some extra checks to make sure #the correct media files are being uploaded and that they do not #exceed the byte size limit #In addition to this we also have more validation on the #front end in jquery if self.cleaned_data['docfile']: content = self.cleaned_data['docfile'] else: content=None if self.cleaned_data['docfile1']: content1= self.cleaned_data['docfile1'] else: content1=None if self.cleaned_data['docfile2']: content2 =self.cleaned_data['docfile2'] else: content2=None if content: content_type = content.content_type.split('/')[0] if content_type in settings.CONTENT_TYPES: if content._size > settings.MAX_UPLOAD_SIZE: raise forms.ValidationError(_('Please keep Full Shabingo Video Filesize under %s. Current filesize %s') % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat (content._size))) else: #logger.debug('Forms- File Not supported: Raise Error') raise forms.ValidationError(_('File type is not supported, Preferred videos formats: .mp4 or .mov') ,code='Wrong File Format') if content1 : content_type = content1.content_type.split('/')[0] if content_type in settings.CONTENT_TYPES: if content1._size > settings.MAX_UPLOAD_1_SIZE: raise forms.ValidationError(_('Please keep Video Preview filesize under %s. Current filesize %s') % (filesizeformat(settings.MAX_UPLOAD_1_SIZE), filesizeformat (content1._size))) else: #logger.debug('Forms- File Not supported: Raise Error') raise forms.ValidationError(_('File type is not supported, Preferred videos formats: .mp4 or .mov') ,code='Wrong File Format') if content2 : content_type = content2.content_type.split('/')[0] if content_type in settings.CONTENT_TYPES: if content2._size > settings.MAX_UPLOAD_2_SIZE: raise forms.ValidationError(_('Please keep Poster filesize under %s. Current filesize %s') % (filesizeformat(settings.MAX_UPLOAD_2_SIZE), filesizeformat (content2._size))) else: #logger.debug('Forms- File Not supported: Raise Error') raise forms.ValidationError(_('File type is not supported, Preferred videos formats: .png or .jpg') ,code='Wrong File Format')
def clean_program(self): program = self.cleaned_data['program'] if program._size > settings.MAX_UPLOAD_SIZE: error = 'Please keep filesize under %s, yours was %s.' %(filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(program._size) ) raise forms.ValidationError(error) return program
def validate_size(content): """Validate if the size of the content in not too big.""" MAX_UPLOAD_SIZE = 102400 # 100kB if content.file.size > MAX_UPLOAD_SIZE: message = _("Please keep filesize under %(limit)s. Current filesize %(current)s") % { 'limit': filesizeformat(MAX_UPLOAD_SIZE), 'current': filesizeformat(content.file.size)} raise ValidationError(message, code='file-size')
def validate_size(content): """Validate if the size of the content in not too big.""" MAX_UPLOAD_SIZE = 102400 # 100kB if content.file.size > MAX_UPLOAD_SIZE: raise ValidationError(_('Please keep filesize under %s. Current filesize %s') % ( filesizeformat(MAX_UPLOAD_SIZE), filesizeformat(content.file.size)) )
def scan_size(value): limit = settings.ALLOWED_SCAN_SIZE if value.size > limit: message = ugettext_lazy( 'Scan size exceeded. Allowed are:') + ' ' + filesizeformat(limit) raise ValidationError(message)
def process(self): """ This method contains the logic for processing tasks asynchronously from a background thread or from a worker. Here tasks that are ready to be processed execute some logic. This could be communication with a processing node or executing a pending action. """ try: if self.pending_action == pending_actions.RESIZE: resized_images = self.resize_images() self.refresh_from_db() self.resize_gcp(resized_images) self.pending_action = None self.save() if self.auto_processing_node and not self.status in [ status_codes.FAILED, status_codes.CANCELED ]: # No processing node assigned and need to auto assign if self.processing_node is None: # Assign first online node with lowest queue count self.processing_node = ProcessingNode.find_best_available_node( ) if self.processing_node: self.processing_node.queue_count += 1 # Doesn't have to be accurate, it will get overridden later self.processing_node.save() logger.info( "Automatically assigned processing node {} to {}". format(self.processing_node, self)) self.save() # Processing node assigned, but is offline and no errors if self.processing_node and not self.processing_node.is_online( ): # If we are queued up # detach processing node, and reassignment # will be processed at the next tick if self.status == status_codes.QUEUED: logger.info( "Processing node {} went offline, reassigning {}..." .format(self.processing_node, self)) self.uuid = '' self.processing_node = None self.status = None self.save() elif self.status == status_codes.RUNNING: # Task was running and processing node went offline # It could have crashed due to low memory # or perhaps it went offline due to network errors. # We can't easily differentiate between the two, so we need # to notify the user because if it crashed due to low memory # the user might need to take action (or be stuck in an infinite loop) raise ProcessingError( "Processing node went offline. This could be due to insufficient memory or a network error." ) if self.processing_node: # Need to process some images (UUID not yet set and task doesn't have pending actions)? if not self.uuid and self.pending_action is None and self.status is None: logger.info("Processing... {}".format(self)) images = [ image.path() for image in self.imageupload_set.all() ] # Track upload progress, but limit the number of DB updates # to every 2 seconds (and always record the 100% progress) last_update = 0 def callback(progress): nonlocal last_update if time.time() - last_update >= 2 or ( progress >= 1.0 - 1e-6 and progress <= 1.0 + 1e-6): Task.objects.filter(pk=self.id).update( upload_progress=progress) last_update = time.time() # This takes a while uuid = self.processing_node.process_new_task( images, self.name, self.options, callback) # Refresh task object before committing change self.refresh_from_db() self.uuid = uuid self.save() # TODO: log process has started processing if self.pending_action is not None: if self.pending_action == pending_actions.CANCEL: # Do we need to cancel the task on the processing node? logger.info("Canceling {}".format(self)) if self.processing_node and self.uuid: # Attempt to cancel the task on the processing node # We don't care if this fails (we tried) try: self.processing_node.cancel_task(self.uuid) except ProcessingException: logger.warning( "Could not cancel {} on processing node. We'll proceed anyway..." .format(self)) self.status = status_codes.CANCELED self.pending_action = None self.save() else: raise ProcessingError( "Cannot cancel a task that has no processing node or UUID" ) elif self.pending_action == pending_actions.RESTART: logger.info("Restarting {}".format(self)) if self.processing_node: # Check if the UUID is still valid, as processing nodes purge # results after a set amount of time, the UUID might have been eliminated. uuid_still_exists = False if self.uuid: try: info = self.processing_node.get_task_info( self.uuid) uuid_still_exists = info['uuid'] == self.uuid except ProcessingException: pass need_to_reprocess = False if uuid_still_exists: # Good to go try: self.processing_node.restart_task( self.uuid, self.options) except ProcessingError as e: # Something went wrong logger.warning( "Could not restart {}, will start a new one" .format(self)) need_to_reprocess = True else: need_to_reprocess = True if need_to_reprocess: logger.info( "{} needs to be reprocessed".format(self)) # Task has been purged (or processing node is offline) # Process this as a new task # Removing its UUID will cause the scheduler # to process this the next tick self.uuid = '' # We also remove the "rerun-from" parameter if it's set self.options = list( filter(lambda d: d['name'] != 'rerun-from', self.options)) self.upload_progress = 0 self.console_output = "" self.processing_time = -1 self.status = None self.last_error = None self.pending_action = None self.running_progress = 0 self.save() else: raise ProcessingError( "Cannot restart a task that has no processing node" ) elif self.pending_action == pending_actions.REMOVE: logger.info("Removing {}".format(self)) if self.processing_node and self.uuid: # Attempt to delete the resources on the processing node # We don't care if this fails, as resources on processing nodes # Are expected to be purged on their own after a set amount of time anyway try: self.processing_node.remove_task(self.uuid) except ProcessingException: pass # What's more important is that we delete our task properly here self.delete() # Stop right here! return if self.processing_node: # Need to update status (first time, queued or running?) if self.uuid and self.status in [ None, status_codes.QUEUED, status_codes.RUNNING ]: # Update task info from processing node info = self.processing_node.get_task_info(self.uuid) self.processing_time = info["processingTime"] self.status = info["status"]["code"] current_lines_count = len(self.console_output.split("\n")) console_output = self.processing_node.get_task_console_output( self.uuid, current_lines_count) if len(console_output) > 0: self.console_output += "\n".join(console_output) + '\n' # Update running progress for line in console_output: for line_match, value in self.TASK_OUTPUT_MILESTONES.items( ): if line_match in line: self.running_progress = value break if "errorMessage" in info["status"]: self.last_error = info["status"]["errorMessage"] # Has the task just been canceled, failed, or completed? if self.status in [ status_codes.FAILED, status_codes.COMPLETED, status_codes.CANCELED ]: logger.info("Processing status: {} for {}".format( self.status, self)) if self.status == status_codes.COMPLETED: # Since we're downloading/extracting results, set temporarely the status back to running self.status = status_codes.RUNNING assets_dir = self.assets_path("") # Remove previous assets directory if os.path.exists(assets_dir): logger.info( "Removing old assets directory: {} for {}". format(assets_dir, self)) shutil.rmtree(assets_dir) os.makedirs(assets_dir) logger.info( "Downloading all.zip for {}".format(self)) # Download all assets zip_stream = self.processing_node.download_task_asset( self.uuid, "all.zip") zip_path = os.path.join(assets_dir, "all.zip") # Keep track of download progress (if possible) content_length = zip_stream.headers.get( 'content-length') total_length = int( content_length ) if content_length is not None else None downloaded = 0 last_update = 0 self.console_output += "Downloading results (%s). Please wait...\n" % ( filesizeformat(total_length) if total_length is not None else 'unknown size') self.save() with open(zip_path, 'wb') as fd: for chunk in zip_stream.iter_content(4096): downloaded += len(chunk) # Track progress if we know the content header length # every 2 seconds if total_length > 0 and time.time( ) - last_update >= 2: Task.objects.filter( pk=self.id ).update(running_progress=( self. TASK_OUTPUT_MILESTONES_LAST_VALUE + (float(downloaded) / total_length) * 0.1)) last_update = time.time() fd.write(chunk) logger.info( "Done downloading all.zip for {}".format(self)) self.refresh_from_db() self.console_output += "Extracting results. This could take a few minutes...\n" self.save() # Extract from zip with zipfile.ZipFile(zip_path, "r") as zip_h: zip_h.extractall(assets_dir) logger.info( "Extracted all.zip for {}".format(self)) # Populate *_extent fields extent_fields = [ (os.path.realpath( self.assets_path("odm_orthophoto", "odm_orthophoto.tif")), 'orthophoto_extent'), (os.path.realpath( self.assets_path("odm_dem", "dsm.tif")), 'dsm_extent'), (os.path.realpath( self.assets_path("odm_dem", "dtm.tif")), 'dtm_extent'), ] for raster_path, field in extent_fields: if os.path.exists(raster_path): # Read extent and SRID raster = GDALRaster(raster_path) extent = OGRGeometry.from_bbox( raster.extent) # It will be implicitly transformed into the SRID of the model’s field # self.field = GEOSGeometry(...) setattr( self, field, GEOSGeometry(extent.wkt, srid=raster.srid)) logger.info( "Populated extent field with {} for {}" .format(raster_path, self)) self.update_available_assets_field() self.running_progress = 1.0 self.console_output += "Done!\n" self.status = status_codes.COMPLETED self.save() from app.plugins import signals as plugin_signals plugin_signals.task_completed.send_robust( sender=self.__class__, task_id=self.id) else: # FAILED, CANCELED self.save() else: # Still waiting... self.save() except ProcessingError as e: self.set_failure(str(e)) except (ConnectionRefusedError, ConnectionError) as e: logger.warning( "{} cannot communicate with processing node: {}".format( self, str(e))) except ProcessingTimeout as e: logger.warning( "{} timed out with error: {}. We'll try reprocessing at the next tick." .format(self, str(e)))
def download_all_missing(self, archives: Iterable[Archive] = None) -> None: files_torrent = [] files_hath = [] if not archives: found_archives: Iterable[Archive] = list(Archive.objects.filter_by_dl_remote()) else: found_archives = archives if not found_archives: return for archive in found_archives: if 'torrent' in archive.match_type: files_torrent.append(archive) elif 'hath' in archive.match_type: files_hath.append(archive) if len(files_torrent) + len(files_hath) == 0: return self.start_connection() if not self.ftps: self.logger.error( "Cannot download the archives, the FTP connection is not initialized." ) return None # Hath downloads if len(files_hath) > 0: self.set_current_dir(self.settings.providers['panda'].remote_hath_dir) # self.ftps.encoding = 'utf8' files_matched_hath = [] for line in self.ftps.mlsd(facts=["type"]): if line[1]["type"] != 'dir': continue m = re.search(r'.*?\[(\d+)\]$', line[0]) if m: for archive in files_hath: if m.group(1) == archive.gallery.gid: files_matched_hath.append( (line[0], archive.zipped.path, int(archive.filesize), archive)) for matched_file_hath in files_matched_hath: total_remote_size = 0 remote_ftp_tuples = [] for img_file_tuple in self.ftps.mlsd(path=matched_file_hath[0], facts=["type", "size"]): if img_file_tuple[1]["type"] != 'file' or img_file_tuple[0] == 'galleryinfo.txt': continue total_remote_size += int(img_file_tuple[1]["size"]) remote_ftp_tuples.append((img_file_tuple[0], img_file_tuple[1]["size"])) if total_remote_size != matched_file_hath[2]: self.logger.info( "For archive: {archive}, remote folder: {folder} " "has not completed the download ({current}/{total}), skipping".format( archive=matched_file_hath[3], folder=matched_file_hath[0], current=filesizeformat(total_remote_size), total=filesizeformat(matched_file_hath[2]) ) ) continue self.logger.info( "For archive: {archive}, downloading and creating zip " "for folder {filename}, {image_count} images".format( archive=matched_file_hath[3], filename=matched_file_hath[1], image_count=len(remote_ftp_tuples) )) dir_path = mkdtemp() self.current_download['total'] = len(remote_ftp_tuples) for count, remote_file in enumerate(sorted(remote_ftp_tuples), start=1): for retry_count in range(10): try: with open(os.path.join(dir_path, remote_file[0]), "wb") as file: self.current_download['index'] = count self.write_file_update_progress( 'RETR %s' % (str(matched_file_hath[0]) + "/" + remote_file[0]), file.write, int(remote_file[1]) ) except (ConnectionResetError, socket.timeout, TimeoutError): self.logger.error("Hath download failed for file {} of {}, restarting connection...".format( count, len(remote_ftp_tuples)) ) self.ftps.close() self.start_connection() self.set_current_dir(self.settings.providers['panda'].remote_hath_dir) else: break with ZipFile(os.path.join(self.settings.MEDIA_ROOT, matched_file_hath[1]), 'w') as archive_file: for (root_path, _, file_names) in os.walk(dir_path): for current_file in file_names: archive_file.write( os.path.join(root_path, current_file), arcname=os.path.basename(current_file)) shutil.rmtree(dir_path, ignore_errors=True) self.process_downloaded_archive(matched_file_hath[3]) # Torrent downloads if len(files_torrent) > 0: self.set_current_dir(self.settings.ftps['remote_torrent_dir']) self.ftps.encoding = 'utf8' files_matched_torrent = [] for line in self.ftps.mlsd(facts=["type", "size"]): if not line[0]: continue if 'type' not in line[1]: continue if line[1]["type"] != 'dir' and line[1]["type"] != 'file': continue for archive in files_torrent: if archive.gallery: cleaned_torrent_name = os.path.splitext( os.path.basename(archive.zipped.path))[0].replace(' [' + archive.gallery.gid + ']', '') else: cleaned_torrent_name = os.path.splitext(os.path.basename(archive.zipped.path))[0] if replace_illegal_name(os.path.splitext(line[0])[0]) in cleaned_torrent_name: if line[1]["type"] == 'dir': files_matched_torrent.append((line[0], line[1]["type"], 0, archive)) else: files_matched_torrent.append((line[0], line[1]["type"], int(line[1]["size"]), archive)) for matched_file_torrent in files_matched_torrent: if matched_file_torrent[1] == 'dir': dir_path = mkdtemp() remote_ftp_files = list(self.ftps.mlsd(path=matched_file_torrent[0], facts=["type", "size"])) self.current_download['total'] = len(remote_ftp_files) self.logger.info( "For archive: {archive}, downloading and creating zip " "for folder {filename}, {image_count} images".format( archive=matched_file_torrent[3], filename=matched_file_torrent[0], image_count=len(remote_ftp_files) )) for count, img_file_tuple in enumerate(remote_ftp_files): if img_file_tuple[1]["type"] != 'file': continue for retry_count in range(10): try: with open(os.path.join(dir_path, img_file_tuple[0]), "wb") as file: self.current_download['index'] = count self.write_file_update_progress( 'RETR %s' % (str(matched_file_torrent[0]) + "/" + img_file_tuple[0]), file.write, int(img_file_tuple[1]["size"]) ) except (ConnectionResetError, socket.timeout, TimeoutError): self.logger.error("Torrent download failed for folder, restarting connection...") self.ftps.close() self.start_connection() self.set_current_dir(self.settings.ftps['remote_torrent_dir']) else: break with ZipFile(matched_file_torrent[3].zipped.path, 'w') as archive_file: for (root_path, _, file_names) in os.walk(dir_path): for current_file in file_names: archive_file.write( os.path.join(root_path, current_file), arcname=os.path.basename(current_file)) shutil.rmtree(dir_path, ignore_errors=True) else: self.logger.info( "For archive: {archive} downloading remote file: {remote} to local file: {local}".format( archive=matched_file_torrent[3], remote=matched_file_torrent[0], local=matched_file_torrent[3].zipped.path ) ) self.current_download['total'] = 1 for retry_count in range(10): try: with open(matched_file_torrent[3].zipped.path, "wb") as file: self.current_download['index'] = 1 self.write_file_update_progress( 'RETR %s' % matched_file_torrent[0], file.write, matched_file_torrent[2]) except (ConnectionResetError, socket.timeout, TimeoutError): self.logger.error("Torrent download failed for archive, restarting connection...") self.ftps.close() self.start_connection() self.set_current_dir(self.settings.ftps['remote_torrent_dir']) else: break if self.settings.convert_rar_to_zip and os.path.splitext(matched_file_torrent[0])[1].lower() == ".rar": self.logger.info( "For archive: {}, converting rar: {} to zip".format( matched_file_torrent[3], matched_file_torrent[3].zipped.path ) ) convert_rar_to_zip(matched_file_torrent[3].zipped.path) self.process_downloaded_archive(matched_file_torrent[3]) self.ftps.close()
def test_localized_formats(self): with self.settings(USE_L10N=True), translation.override('de'): self.assertEqual(filesizeformat(1023), '1023\xa0Bytes') self.assertEqual(filesizeformat(1024), '1,0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1,0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1,0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1,0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000,0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0Bytes') self.assertEqual(filesizeformat(""), '0\xa0Bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0Bytes')
def diagnose(): """ This command diagnoses an installation of KA Lite It has to be able to work with instances of KA Lite that users do not actually own, however it's assumed that the path and the 'kalite' commands are configured and work. The function is currently non-robust, meaning that not all aspects of diagnose data collection is guaranteed to succeed, thus the command could potentially fail :( Example: KALITE_HOME=/home/otheruser/.kalite kalite diagnose --port=7007 """ print("") print("KA Lite diagnostics") print("") # Tell users we are calculating, because checking the size of the # content directory is slow. Flush immediately after. print("Calculating diagnostics...") sys.stdout.flush() print("") # Key, value store for diagnostics # Not using OrderedDict because of python 2.6 diagnostics = [] diag = lambda x, y: diagnostics.append((x, y)) diag("KA Lite version", kalite.__version__) diag("python", sys.version) diag("platform", platform.platform()) status_code, urls = get_urls() for addr in urls: diag("server address", addr) for addr in get_urls_proxy(): diag("server proxy", addr) diag("server status", status.codes[status_code]) settings_imported = True # Diagnostics from settings try: from django.conf import settings from django.template.defaultfilters import filesizeformat except: settings_imported = False diag("Settings failure", traceback.format_exc()) if settings_imported: diag("installed in", os.path.dirname(kalite.__file__)) diag("content root", settings.CONTENT_ROOT) diag("content size", filesizeformat(get_size(settings.CONTENT_ROOT))) diag("user database", settings.DATABASES['default']['NAME']) try: from securesync.models import Device device = Device.get_own_device() sync_sessions = device.client_sessions.all() zone = device.get_zone() diag("device name", str(device.name)) diag("device ID", str(device.id)) diag("device registered", str(device.is_registered())) diag( "synced", str( sync_sessions.latest('timestamp'). timestamp if sync_sessions.exists() else "Never")) diag("sync result", ("OK" if sync_sessions.latest('timestamp').errors == 0 else "Error") if sync_sessions.exists() else "-") diag("zone ID", str(zone.id) if zone else "Unset") except: diag("Device failure", traceback.format_exc()) for k, v in diagnostics: # Pad all the values to match the key column values = str(v).split("\n") values = "\n".join([values[0]] + map(lambda x: (" " * 22) + x, values[1:])) print((k.upper() + ": ").ljust(21), values)
class CreateForm(forms.SelfHandlingForm): name = forms.CharField(max_length="255", label=_("Volume Name")) description = forms.CharField(widget=forms.Textarea, label=_("Description"), required=False) type = forms.ChoiceField(label=_("Type"), required=False) size = forms.IntegerField(min_value=1, label=_("Size (GB)")) encryption = forms.ChoiceField(label=_("Encryption"), required=False) volume_source_type = forms.ChoiceField(label=_("Volume Source"), required=False) snapshot_source = forms.ChoiceField( label=_("Use snapshot as a source"), widget=SelectWidget(attrs={'class': 'snapshot-selector'}, data_attrs=('size', 'display_name'), transform=lambda x: ("%s (%sGB)" % (x.display_name, x.size))), required=False) image_source = forms.ChoiceField( label=_("Use image as a source"), widget=SelectWidget(attrs={'class': 'image-selector'}, data_attrs=('size', 'name'), transform=lambda x: ("%s (%s)" % (x.name, filesizeformat(x.bytes)))), required=False) def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) volume_types = cinder.volume_type_list(request) self.fields['type'].choices = [("", "")] + \ [(type.name, type.name) for type in volume_types] # Hide the volume encryption field if the hypervisor doesn't support it # NOTE: as of Grizzly this is not yet supported in Nova so enabling # this setting will not do anything useful hypervisor_features = getattr(settings, "OPENSTACK_HYPERVISOR_FEATURES", {}) can_encrypt_volumes = hypervisor_features.get("can_encrypt_volumes", False) if can_encrypt_volumes: # TODO(laura-glendenning) get from api call in future encryption_options = {"LUKS": "dmcrypt LUKS"} self.fields['encryption'].choices = [("", "")] + \ [(enc, display) for enc, display in encryption_options.items()] else: self.fields['encryption'].widget = forms.widgets.HiddenInput() self.fields['encryption'].required = False if ("snapshot_id" in request.GET): try: snapshot = self.get_snapshot(request, request.GET["snapshot_id"]) self.fields['name'].initial = snapshot.display_name self.fields['size'].initial = snapshot.size self.fields['snapshot_source'].choices = ((snapshot.id, snapshot), ) try: # Set the volume type from the original volume orig_volume = cinder.volume_get(request, snapshot.volume_id) self.fields['type'].initial = orig_volume.volume_type except: pass self.fields['size'].help_text = _( 'Volume size must be equal ' 'to or greater than the snapshot size (%sGB)' % snapshot.size) del self.fields['image_source'] del self.fields['volume_source_type'] except: exceptions.handle(request, _('Unable to load the specified snapshot.')) elif ('image_id' in request.GET): try: image = self.get_image(request, request.GET["image_id"]) image.bytes = image.size self.fields['name'].initial = image.name self.fields['size'].initial = bytes_to_gigabytes(image.size) self.fields['image_source'].choices = ((image.id, image), ) self.fields['size'].help_text = _( 'Volume size must be equal ' 'to or greater than the image size (%s)' % filesizeformat(image.size)) del self.fields['snapshot_source'] del self.fields['volume_source_type'] except: msg = _('Unable to load the specified image. %s') exceptions.handle(request, msg % request.GET['image_id']) else: source_type_choices = [] try: snapshots = cinder.volume_snapshot_list(request) if snapshots: source_type_choices.append( ("snapshot_source", _("Snapshot"))) choices = [('', _("Choose a snapshot"))] + \ [(s.id, s) for s in snapshots] self.fields['snapshot_source'].choices = choices else: del self.fields['snapshot_source'] except: exceptions.handle(request, _("Unable to retrieve " "volume snapshots.")) images = get_available_images(request, request.user.tenant_id) if images: source_type_choices.append(("image_source", _("Image"))) choices = [('', _("Choose an image"))] for image in images: image.bytes = image.size image.size = bytes_to_gigabytes(image.bytes) choices.append((image.id, image)) self.fields['image_source'].choices = choices else: del self.fields['image_source'] if source_type_choices: choices = ( [('no_source_type', _("No source, empty volume."))] + source_type_choices) self.fields['volume_source_type'].choices = choices else: del self.fields['volume_source_type'] def handle(self, request, data): try: # FIXME(johnp): cinderclient currently returns a useless # error message when the quota is exceeded when trying to create # a volume, so we need to check for that scenario here before we # send it off to try and create. usages = cinder.tenant_absolute_limits(self.request) volumes = cinder.volume_list(self.request) total_size = sum( [getattr(volume, 'size', 0) for volume in volumes]) usages['gigabytesUsed'] = total_size usages['volumesUsed'] = len(volumes) availableGB = usages['maxTotalVolumeGigabytes'] -\ usages['gigabytesUsed'] availableVol = usages['maxTotalVolumes'] - usages['volumesUsed'] snapshot_id = None image_id = None source_type = data.get('volume_source_type', None) if (data.get("snapshot_source", None) and source_type in [None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if (data['size'] < snapshot.size): error_message = _('The volume size cannot be less than ' 'the snapshot size (%sGB)' % snapshot.size) raise ValidationError(error_message) elif (data.get("image_source", None) and source_type in [None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = bytes_to_gigabytes(image.size) if (data['size'] < image_size): error_message = _('The volume size cannot be less than ' 'the image size (%s)' % filesizeformat(image.size)) raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGB cannot be created as ' 'you only have %(avail)iGB of your quota ' 'available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} if data['encryption']: metadata['encryption'] = data['encryption'] volume = cinder.volume_create(request, data['size'], data['name'], data['description'], data['type'], snapshot_id=snapshot_id, image_id=image_id, metadata=metadata) message = _('Creating volume "%s"') % data['name'] messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except: exceptions.handle(request, ignore=True) self.api_error(_("Unable to create volume.")) return False @memoized def get_snapshot(self, request, id): return cinder.volume_snapshot_get(request, id) @memoized def get_image(self, request, id): return glance.image_get(request, id)
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None): logger = logging.getLogger('limitation') full_result = dict(count=0, size=0, elements=[]) if limit_duplicated: result = delete_duplicate_crashes(limit=limit_duplicated) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_days: result = delete_older_than(*model, limit=limit_days) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_size: result = delete_size_is_exceeded(*model, limit=limit_size) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None ids_list = sorted([element['id'] for element in full_result['elements']]) raven_extra = { "id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list } raven.captureMessage( "[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %s [%s]" % (full_result['count'], model[1], filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), log_id), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=filters.filesizeformat(full_result['size']).replace( u'\xa0', u' '), model=model[1], limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual') logger.info(add_extra_to_log_message('Manual cleanup', extra=extra)) for element in full_result['elements']: element.update({ "log_id": log_id, "%s_id" % (model[1]): element.pop('id') }) logger.info( add_extra_to_log_message('Manual cleanup element', extra=element))
class SetInstanceDetailsAction(workflows.Action): availability_zone = forms.ThemableChoiceField(label=_("Availability Zone"), required=False) name = forms.CharField(label=_("Instance Name"), max_length=255) flavor = forms.ThemableChoiceField(label=_("Flavor"), help_text=_("Size of image to launch.")) count = forms.IntegerField(label=_("Number of Instances"), min_value=1, initial=1) source_type = forms.ThemableChoiceField( label=_("Instance Boot Source"), help_text=_("Choose Your Boot Source " "Type.")) instance_snapshot_id = forms.ThemableChoiceField( label=_("Instance Snapshot"), required=False) volume_id = forms.ThemableChoiceField(label=_("Volume"), required=False) volume_snapshot_id = forms.ThemableChoiceField(label=_("Volume Snapshot"), required=False) image_id = forms.ChoiceField( label=_("Image Name"), required=False, widget=forms.ThemableSelectWidget( data_attrs=('volume_size',), transform=lambda x: ("%s (%s)" % (x.name, filesizeformat(x.bytes))))) volume_size = forms.IntegerField(label=_("Device size (GB)"), initial=1, min_value=0, required=False, help_text=_("Volume size in gigabytes " "(integer value).")) device_name = forms.CharField(label=_("Device Name"), required=False, initial="vda", help_text=_("Volume mount point (e.g. 'vda' " "mounts at '/dev/vda'). Leave " "this field blank to let the " "system choose a device name " "for you.")) vol_delete_on_instance_delete = forms.BooleanField( label=_("Delete Volume on Instance Delete"), initial=False, required=False, help_text=_("Delete volume when the instance is deleted")) class Meta(object): name = _("Details") help_text_template = ("project/instances/" "_launch_details_help.html") def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__( request, context, *args, **kwargs) # Hide the device field if the hypervisor doesn't support it. if not nova.can_set_mount_point(): self.fields['device_name'].widget = forms.widgets.HiddenInput() source_type_choices = [ ('', _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if cinder.is_volume_service_enabled(request): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append( ("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle(request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append( ("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices @memoized.memoized_method def _get_flavor(self, flavor_id): try: # We want to retrieve details for a given flavor, # however flavor_list uses a memoized decorator # so it is used instead of flavor_get to reduce the number # of API calls. flavors = instance_utils.flavor_list(self.request) flavor = [x for x in flavors if x.id == flavor_id][0] except IndexError: flavor = None return flavor @memoized.memoized_method def _get_image(self, image_id): try: # We want to retrieve details for a given image, # however get_available_images uses a cache of image list, # so it is used instead of image_get to reduce the number # of API calls. images = image_utils.get_available_images( self.request, self.context.get('project_id'), self._images_cache) image = [x for x in images if x.id == image_id][0] except IndexError: image = None return image def _check_quotas(self, cleaned_data): count = cleaned_data.get('count', 1) # Prevent launching more instances than the quota allows usages = quotas.tenant_quota_usages( self.request, targets=('instances', 'cores', 'ram', 'volumes', )) available_count = usages['instances']['available'] if available_count < count: msg = (_('The requested instance(s) cannot be launched ' 'as your quota will be exceeded: Available: ' '%(avail)s, Requested: %(req)s.') % {'avail': available_count, 'req': count}) raise forms.ValidationError(msg) source_type = cleaned_data.get('source_type') if source_type in ('volume_image_id', 'volume_snapshot_id'): available_volume = usages['volumes']['available'] if available_volume < count: msg = (_('The requested instance cannot be launched. ' 'Requested volume exceeds quota: Available: ' '%(avail)s, Requested: %(req)s.') % {'avail': available_volume, 'req': count}) raise forms.ValidationError(msg) flavor_id = cleaned_data.get('flavor') flavor = self._get_flavor(flavor_id) count_error = [] # Validate cores and ram. available_cores = usages['cores']['available'] if flavor and available_cores < count * flavor.vcpus: count_error.append(_("Cores(Available: %(avail)s, " "Requested: %(req)s)") % {'avail': available_cores, 'req': count * flavor.vcpus}) available_ram = usages['ram']['available'] if flavor and available_ram < count * flavor.ram: count_error.append(_("RAM(Available: %(avail)s, " "Requested: %(req)s)") % {'avail': available_ram, 'req': count * flavor.ram}) if count_error: value_str = ", ".join(count_error) msg = (_('The requested instance cannot be launched. ' 'The following requested resource(s) exceed ' 'quota(s): %s.') % value_str) if count == 1: self._errors['flavor'] = self.error_class([msg]) else: self._errors['count'] = self.error_class([msg]) def _check_flavor_for_image(self, cleaned_data): # Prevents trying to launch an image needing more resources. image_id = cleaned_data.get('image_id') image = self._get_image(image_id) flavor_id = cleaned_data.get('flavor') flavor = self._get_flavor(flavor_id) if not image or not flavor: return props_mapping = (("min_ram", "ram"), ("min_disk", "disk")) for iprop, fprop in props_mapping: if (getattr(image, iprop) > 0 and getattr(flavor, fprop) > 0 and getattr(image, iprop) > getattr(flavor, fprop)): msg = (_("The flavor '%(flavor)s' is too small " "for requested image.\n" "Minimum requirements: " "%(min_ram)s MB of RAM and " "%(min_disk)s GB of Root Disk.") % {'flavor': flavor.name, 'min_ram': image.min_ram, 'min_disk': image.min_disk}) self._errors['image_id'] = self.error_class([msg]) break # Not necessary to continue the tests. def _check_volume_for_image(self, cleaned_data): image_id = cleaned_data.get('image_id') image = self._get_image(image_id) volume_size = cleaned_data.get('volume_size') if not image or not volume_size: return volume_size = int(volume_size) img_gigs = functions.bytes_to_gigabytes(image.size) smallest_size = max(img_gigs, image.min_disk) if volume_size < smallest_size: msg = (_("The Volume size is too small for the" " '%(image_name)s' image and has to be" " greater than or equal to " "'%(smallest_size)d' GB.") % {'image_name': image.name, 'smallest_size': smallest_size}) self._errors['volume_size'] = self.error_class([msg]) def _check_source_image(self, cleaned_data): if not cleaned_data.get('image_id'): msg = _("You must select an image.") self._errors['image_id'] = self.error_class([msg]) else: self._check_flavor_for_image(cleaned_data) def _check_source_volume_image(self, cleaned_data): volume_size = self.data.get('volume_size', None) if not volume_size: msg = _("You must set volume size") self._errors['volume_size'] = self.error_class([msg]) if float(volume_size) <= 0: msg = _("Volume size must be greater than 0") self._errors['volume_size'] = self.error_class([msg]) if not cleaned_data.get('image_id'): msg = _("You must select an image.") self._errors['image_id'] = self.error_class([msg]) return else: self._check_flavor_for_image(cleaned_data) self._check_volume_for_image(cleaned_data) def _check_source_instance_snapshot(self, cleaned_data): # using the array form of get blows up with KeyError # if instance_snapshot_id is nil if not cleaned_data.get('instance_snapshot_id'): msg = _("You must select a snapshot.") self._errors['instance_snapshot_id'] = self.error_class([msg]) def _check_source_volume(self, cleaned_data): if not cleaned_data.get('volume_id'): msg = _("You must select a volume.") self._errors['volume_id'] = self.error_class([msg]) # Prevent launching multiple instances with the same volume. # TODO(gabriel): is it safe to launch multiple instances with # a snapshot since it should be cloned to new volumes? count = cleaned_data.get('count', 1) if count > 1: msg = _('Launching multiple instances is only supported for ' 'images and instance snapshots.') raise forms.ValidationError(msg) def _check_source_volume_snapshot(self, cleaned_data): if not cleaned_data.get('volume_snapshot_id'): msg = _("You must select a snapshot.") self._errors['volume_snapshot_id'] = self.error_class([msg]) def _check_source(self, cleaned_data): # Validate our instance source. source_type = self.data.get('source_type', None) source_check_methods = { 'image_id': self._check_source_image, 'volume_image_id': self._check_source_volume_image, 'instance_snapshot_id': self._check_source_instance_snapshot, 'volume_id': self._check_source_volume, 'volume_snapshot_id': self._check_source_volume_snapshot } check_method = source_check_methods.get(source_type) if check_method: check_method(cleaned_data) def clean(self): cleaned_data = super(SetInstanceDetailsAction, self).clean() self._check_quotas(cleaned_data) self._check_source(cleaned_data) return cleaned_data def populate_flavor_choices(self, request, context): return instance_utils.flavor_field_data(request, False) def populate_availability_zone_choices(self, request, context): try: zones = api.nova.availability_zone_list(request) except Exception: zones = [] exceptions.handle(request, _('Unable to retrieve availability zones.')) zone_list = [(zone.zoneName, zone.zoneName) for zone in zones if zone.zoneState['available']] zone_list.sort() if not zone_list: zone_list.insert(0, ("", _("No availability zones found"))) elif len(zone_list) > 1: zone_list.insert(0, ("", _("Any Availability Zone"))) return zone_list def get_help_text(self, extra_context=None): extra = {} if extra_context is None else dict(extra_context) try: extra['usages'] = quotas.tenant_quota_usages( self.request, targets=('instances', 'cores', 'ram', 'volumes', 'gigabytes')) extra['usages_json'] = json.dumps(extra['usages']) extra['cinder_enabled'] = \ base.is_service_enabled(self.request, 'volume') flavors = json.dumps([f._info for f in instance_utils.flavor_list(self.request)]) extra['flavors'] = flavors images = image_utils.get_available_images( self.request, self.initial['project_id'], self._images_cache) if images is not None: attrs = [{'id': i.id, 'min_disk': getattr(i, 'min_disk', 0), 'min_ram': getattr(i, 'min_ram', 0), 'size': functions.bytes_to_gigabytes(i.size)} for i in images] extra['images'] = json.dumps(attrs) except Exception: exceptions.handle(self.request, _("Unable to retrieve quota information.")) return super(SetInstanceDetailsAction, self).get_help_text(extra) def _init_images_cache(self): if not hasattr(self, '_images_cache'): self._images_cache = {} def _get_volume_display_name(self, volume): if hasattr(volume, "volume_id"): vol_type = "snap" visible_label = _("Snapshot") else: vol_type = "vol" visible_label = _("Volume") return (("%s:%s" % (volume.id, vol_type)), (_("%(name)s - %(size)s GB (%(label)s)") % {'name': volume.name, 'size': volume.size, 'label': visible_label})) def populate_image_id_choices(self, request, context): choices = [] images = image_utils.get_available_images(request, context.get('project_id'), self._images_cache) for image in images: if image_tables.get_image_type(image) != "snapshot": image.bytes = getattr( image, 'virtual_size', None) or image.size image.volume_size = max( image.min_disk, functions.bytes_to_gigabytes(image.bytes)) choices.append((image.id, image)) if context.get('image_id') == image.id and \ 'volume_size' not in context: context['volume_size'] = image.volume_size if choices: choices.sort(key=lambda c: c[1].name or '') choices.insert(0, ("", _("Select Image"))) else: choices.insert(0, ("", _("No images available"))) return choices def populate_instance_snapshot_id_choices(self, request, context): images = image_utils.get_available_images(request, context.get('project_id'), self._images_cache) choices = [(image.id, image.name) for image in images if image_tables.get_image_type(image) == "snapshot"] if choices: choices.sort(key=operator.itemgetter(1)) choices.insert(0, ("", _("Select Instance Snapshot"))) else: choices.insert(0, ("", _("No snapshots available"))) return choices def populate_volume_id_choices(self, request, context): volumes = [] try: if cinder.is_volume_service_enabled(request): available = api.cinder.VOLUME_STATE_AVAILABLE volumes = [self._get_volume_display_name(v) for v in cinder.volume_list(self.request, search_opts=dict(status=available, bootable=True))] except Exception: exceptions.handle(self.request, _('Unable to retrieve list of volumes.')) if volumes: volumes.insert(0, ("", _("Select Volume"))) else: volumes.insert(0, ("", _("No volumes available"))) return volumes def populate_volume_snapshot_id_choices(self, request, context): snapshots = [] try: if cinder.is_volume_service_enabled(request): available = api.cinder.VOLUME_STATE_AVAILABLE volumes = [v.id for v in cinder.volume_list( self.request, search_opts=dict(bootable=True))] snapshots = [self._get_volume_display_name(s) for s in cinder.volume_snapshot_list( self.request, search_opts=dict(status=available)) if s.volume_id in volumes] except Exception: exceptions.handle(self.request, _('Unable to retrieve list of volume ' 'snapshots.')) if snapshots: snapshots.insert(0, ("", _("Select Volume Snapshot"))) else: snapshots.insert(0, ("", _("No volume snapshots available"))) return snapshots
class CreateForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Volume Name"), required=False) description = forms.CharField(max_length=255, widget=forms.Textarea( attrs={'rows': 4}), label=_("Description"), required=False) volume_source_type = forms.ChoiceField(label=_("Volume Source"), required=False, widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'source'})) snapshot_source = forms.ChoiceField( label=_("Use snapshot as a source"), widget=forms.SelectWidget( attrs={'class': 'snapshot-selector'}, data_attrs=('size', 'name'), transform=lambda x: "%s (%s GiB)" % (x.name, x.size)), required=False) image_source = forms.ChoiceField( label=_("Use image as a source"), widget=forms.SelectWidget( attrs={'class': 'image-selector'}, data_attrs=('size', 'name', 'min_disk'), transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))), required=False) volume_source = forms.ChoiceField( label=_("Use a volume as source"), widget=forms.SelectWidget( attrs={'class': 'image-selector'}, data_attrs=('size', 'name'), transform=lambda x: "%s (%s GiB)" % (x.name, x.size)), required=False) type = forms.ChoiceField( label=_("Type"), required=False, widget=forms.Select( attrs={'class': 'switched', 'data-switch-on': 'source', 'data-source-no_source_type': _('Type'), 'data-source-image_source': _('Type')})) size = forms.IntegerField(min_value=1, initial=1, label=_("Size (GiB)")) availability_zone = forms.ChoiceField( label=_("Availability Zone"), required=False, widget=forms.Select( attrs={'class': 'switched', 'data-switch-on': 'source', 'data-source-no_source_type': _('Availability Zone'), 'data-source-image_source': _('Availability Zone')})) def prepare_source_fields_if_snapshot_specified(self, request): try: snapshot = self.get_snapshot(request, request.GET["snapshot_id"]) self.fields['name'].initial = snapshot.name self.fields['size'].initial = snapshot.size self.fields['snapshot_source'].choices = ((snapshot.id, snapshot),) try: # Set the volume type from the original volume orig_volume = cinder.volume_get(request, snapshot.volume_id) self.fields['type'].initial = orig_volume.volume_type except Exception: pass self.fields['size'].help_text = ( _('Volume size must be equal to or greater than the ' 'snapshot size (%sGiB)') % snapshot.size) del self.fields['image_source'] del self.fields['volume_source'] del self.fields['volume_source_type'] del self.fields['availability_zone'] except Exception: exceptions.handle(request, _('Unable to load the specified snapshot.')) def prepare_source_fields_if_image_specified(self, request): self.fields['availability_zone'].choices = \ availability_zones(request) try: image = self.get_image(request, request.GET["image_id"]) image.bytes = image.size self.fields['name'].initial = image.name min_vol_size = functions.bytes_to_gigabytes( image.size) size_help_text = (_('Volume size must be equal to or greater ' 'than the image size (%s)') % filesizeformat(image.size)) properties = getattr(image, 'properties', {}) min_disk_size = (getattr(image, 'min_disk', 0) or properties.get('min_disk', 0)) if (min_disk_size > min_vol_size): min_vol_size = min_disk_size size_help_text = (_('Volume size must be equal to or ' 'greater than the image minimum ' 'disk size (%sGiB)') % min_disk_size) self.fields['size'].initial = min_vol_size self.fields['size'].help_text = size_help_text self.fields['image_source'].choices = ((image.id, image),) del self.fields['snapshot_source'] del self.fields['volume_source'] del self.fields['volume_source_type'] except Exception: msg = _('Unable to load the specified image. %s') exceptions.handle(request, msg % request.GET['image_id']) def prepare_source_fields_if_volume_specified(self, request): self.fields['availability_zone'].choices = \ availability_zones(request) volume = None try: volume = self.get_volume(request, request.GET["volume_id"]) except Exception: msg = _('Unable to load the specified volume. %s') exceptions.handle(request, msg % request.GET['volume_id']) if volume is not None: self.fields['name'].initial = volume.name self.fields['description'].initial = volume.description min_vol_size = volume.size size_help_text = (_('Volume size must be equal to or greater ' 'than the origin volume size (%sGiB)') % volume.size) self.fields['size'].initial = min_vol_size self.fields['size'].help_text = size_help_text self.fields['volume_source'].choices = ((volume.id, volume),) self.fields['type'].initial = volume.type del self.fields['snapshot_source'] del self.fields['image_source'] del self.fields['volume_source_type'] def prepare_source_fields_default(self, request): source_type_choices = [] self.fields['availability_zone'].choices = \ availability_zones(request) try: available = api.cinder.VOLUME_STATE_AVAILABLE snapshots = cinder.volume_snapshot_list( request, search_opts=dict(status=available)) if snapshots: source_type_choices.append(("snapshot_source", _("Snapshot"))) choices = [('', _("Choose a snapshot"))] + \ [(s.id, s) for s in snapshots] self.fields['snapshot_source'].choices = choices else: del self.fields['snapshot_source'] except Exception: exceptions.handle(request, _("Unable to retrieve volume snapshots.")) images = utils.get_available_images(request, request.user.tenant_id) if images: source_type_choices.append(("image_source", _("Image"))) choices = [('', _("Choose an image"))] for image in images: image.bytes = image.size image.size = functions.bytes_to_gigabytes(image.bytes) choices.append((image.id, image)) self.fields['image_source'].choices = choices else: del self.fields['image_source'] volumes = self.get_volumes(request) if volumes: source_type_choices.append(("volume_source", _("Volume"))) choices = [('', _("Choose a volume"))] for volume in volumes: choices.append((volume.id, volume)) self.fields['volume_source'].choices = choices else: del self.fields['volume_source'] if source_type_choices: choices = ([('no_source_type', _("No source, empty volume"))] + source_type_choices) self.fields['volume_source_type'].choices = choices else: del self.fields['volume_source_type'] def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) volume_types = cinder.volume_type_list(request) self.fields['type'].choices = [("no_type", _("No volume type"))] + \ [(type.name, type.name) for type in volume_types] if 'initial' in kwargs and 'type' in kwargs['initial']: # if there is a default volume type to select, then remove # the first ""No volume type" entry self.fields['type'].choices.pop(0) if "snapshot_id" in request.GET: self.prepare_source_fields_if_snapshot_specified(request) elif 'image_id' in request.GET: self.prepare_source_fields_if_image_specified(request) elif 'volume_id' in request.GET: self.prepare_source_fields_if_volume_specified(request) else: self.prepare_source_fields_default(request) def clean(self): cleaned_data = super(CreateForm, self).clean() source_type = self.cleaned_data.get('volume_source_type') if (source_type == 'image_source' and not cleaned_data.get('image_source')): msg = _('Image source must be specified') self._errors['image_source'] = self.error_class([msg]) elif (source_type == 'snapshot_source' and not cleaned_data.get('snapshot_source')): msg = _('Snapshot source must be specified') self._errors['snapshot_source'] = self.error_class([msg]) elif (source_type == 'volume_source' and not cleaned_data.get('volume_source')): msg = _('Volume source must be specified') self._errors['volume_source'] = self.error_class([msg]) return cleaned_data def get_volumes(self, request): volumes = [] try: available = api.cinder.VOLUME_STATE_AVAILABLE volumes = cinder.volume_list(self.request, search_opts=dict(status=available)) except Exception: exceptions.handle(request, _('Unable to retrieve list of volumes.')) return volumes def handle(self, request, data): try: usages = quotas.tenant_limit_usages(self.request) availableGB = usages['maxTotalVolumeGigabytes'] - \ usages['gigabytesUsed'] availableVol = usages['maxTotalVolumes'] - usages['volumesUsed'] snapshot_id = None image_id = None volume_id = None source_type = data.get('volume_source_type', None) az = data.get('availability_zone', None) or None if (data.get("snapshot_source", None) and source_type in ['', None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if (data['size'] < snapshot.size): error_message = (_('The volume size cannot be less than ' 'the snapshot size (%sGiB)') % snapshot.size) raise ValidationError(error_message) az = None elif (data.get("image_source", None) and source_type in ['', None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = functions.bytes_to_gigabytes(image.size) if (data['size'] < image_size): error_message = (_('The volume size cannot be less than ' 'the image size (%s)') % filesizeformat(image.size)) raise ValidationError(error_message) properties = getattr(image, 'properties', {}) min_disk_size = (getattr(image, 'min_disk', 0) or properties.get('min_disk', 0)) if (min_disk_size > 0 and data['size'] < min_disk_size): error_message = (_('The volume size cannot be less than ' 'the image minimum disk size (%sGiB)') % min_disk_size) raise ValidationError(error_message) elif (data.get("volume_source", None) and source_type in ['', None, 'volume_source']): # Create from volume volume = self.get_volume(request, data["volume_source"]) volume_id = volume.id if data['size'] < volume.size: error_message = (_('The volume size cannot be less than ' 'the source volume size (%sGiB)') % volume.size) raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGiB cannot be created ' 'as you only have %(avail)iGiB of your ' 'quota available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} if data['type'] == 'no_type': data['type'] = '' volume = cinder.volume_create(request, data['size'], data['name'], data['description'], data['type'], snapshot_id=snapshot_id, image_id=image_id, metadata=metadata, availability_zone=az, source_volid=volume_id) message = _('Creating volume "%s"') % data['name'] messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except Exception: redirect = reverse("horizon:project:volumes:index") exceptions.handle(request, _("Unable to create volume."), redirect=redirect) @memoized def get_snapshot(self, request, id): return cinder.volume_snapshot_get(request, id) @memoized def get_image(self, request, id): return glance.image_get(request, id) @memoized def get_volume(self, request, id): return cinder.volume_get(request, id)
class SetInstanceDetailsAction(workflows.Action): availability_zone = forms.ChoiceField(label=_("Availability Zone"), required=False) name = forms.CharField(label=_("Instance Name"), max_length=255) flavor = forms.ChoiceField(label=_("Flavor"), help_text=_("Size of image to launch.")) count = forms.IntegerField(label=_("Instance Count"), min_value=1, initial=1, help_text=_("Number of instances to launch.")) source_type = forms.ChoiceField(label=_("Instance Boot Source"), help_text=_("Choose Your Boot Source " "Type.")) instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"), required=False) volume_id = forms.ChoiceField(label=_("Volume"), required=False) volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"), required=False) image_id = forms.ChoiceField( label=_("Image Name"), required=False, widget=forms.SelectWidget(data_attrs=('volume_size', ), transform=lambda x: ("%s (%s)" % (x.name, filesizeformat(x.bytes))))) volume_size = forms.IntegerField(label=_("Device size (GB)"), min_value=1, required=False, help_text=_("Volume size in gigabytes " "(integer value).")) device_name = forms.CharField(label=_("Device Name"), required=False, initial="vda", help_text=_("Volume mount point (e.g. 'vda' " "mounts at '/dev/vda').")) delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"), initial=False, required=False, help_text=_("Delete volume on " "instance terminate")) class Meta: name = _("Details") help_text_template = ("project/instances/" "_launch_details_help.html") def __init__(self, request, context, *args, **kwargs): self._init_images_cache() self.request = request self.context = context super(SetInstanceDetailsAction, self).__init__(request, context, *args, **kwargs) source_type_choices = [ ('', _("Select source")), ("image_id", _("Boot from image")), ("instance_snapshot_id", _("Boot from snapshot")), ] if base.is_service_enabled(request, 'volume'): source_type_choices.append(("volume_id", _("Boot from volume"))) try: if api.nova.extension_supported("BlockDeviceMappingV2Boot", request): source_type_choices.append( ("volume_image_id", _("Boot from image (creates a new volume)"))) except Exception: exceptions.handle( request, _('Unable to retrieve extensions ' 'information.')) source_type_choices.append( ("volume_snapshot_id", _("Boot from volume snapshot (creates a new volume)"))) self.fields['source_type'].choices = source_type_choices def clean(self): cleaned_data = super(SetInstanceDetailsAction, self).clean() count = cleaned_data.get('count', 1) # Prevent launching more instances than the quota allows usages = quotas.tenant_quota_usages(self.request) available_count = usages['instances']['available'] if available_count < count: error_message = ungettext_lazy( 'The requested instance ' 'cannot be launched as you only ' 'have %(avail)i of your quota ' 'available. ', 'The requested %(req)i instances ' 'cannot be launched as you only ' 'have %(avail)i of your quota ' 'available.', count) params = {'req': count, 'avail': available_count} raise forms.ValidationError(error_message % params) try: flavor_id = cleaned_data.get('flavor') # We want to retrieve details for a given flavor, # however flavor_list uses a memoized decorator # so it is used instead of flavor_get to reduce the number # of API calls. flavors = instance_utils.flavor_list(self.request) flavor = [x for x in flavors if x.id == flavor_id][0] except IndexError: flavor = None count_error = [] # Validate cores and ram. available_cores = usages['cores']['available'] if flavor and available_cores < count * flavor.vcpus: count_error.append( _("Cores(Available: %(avail)s, " "Requested: %(req)s)") % { 'avail': available_cores, 'req': count * flavor.vcpus }) available_ram = usages['ram']['available'] if flavor and available_ram < count * flavor.ram: count_error.append( _("RAM(Available: %(avail)s, " "Requested: %(req)s)") % { 'avail': available_ram, 'req': count * flavor.ram }) if count_error: value_str = ", ".join(count_error) msg = (_('The requested instance cannot be launched. ' 'The following requested resource(s) exceed ' 'quota(s): %s.') % value_str) if count == 1: self._errors['flavor'] = self.error_class([msg]) else: self._errors['count'] = self.error_class([msg]) # Validate our instance source. source_type = self.data.get('source_type', None) if source_type in ('image_id', 'volume_image_id'): if source_type == 'volume_image_id': if not self.data.get('volume_size', None): msg = _("You must set volume size") self._errors['volume_size'] = self.error_class([msg]) if not cleaned_data.get('device_name'): msg = _("You must set device name") self._errors['device_name'] = self.error_class([msg]) if not cleaned_data.get('image_id'): msg = _("You must select an image.") self._errors['image_id'] = self.error_class([msg]) else: # Prevents trying to launch an image needing more resources. try: image_id = cleaned_data.get('image_id') # We want to retrieve details for a given image, # however get_available_images uses a cache of image list, # so it is used instead of image_get to reduce the number # of API calls. images = image_utils.get_available_images( self.request, self.context.get('project_id'), self._images_cache) image = [x for x in images if x.id == image_id][0] except IndexError: image = None if image and flavor: props_mapping = (("min_ram", "ram"), ("min_disk", "disk")) for iprop, fprop in props_mapping: if getattr(image, iprop) > 0 and \ getattr(image, iprop) > getattr(flavor, fprop): msg = (_("The flavor '%(flavor)s' is too small " "for requested image.\n" "Minimum requirements: " "%(min_ram)s MB of RAM and " "%(min_disk)s GB of Root Disk.") % { 'flavor': flavor.name, 'min_ram': image.min_ram, 'min_disk': image.min_disk }) self._errors['image_id'] = self.error_class([msg]) break # Not necessary to continue the tests. volume_size = cleaned_data.get('volume_size') if volume_size and source_type == 'volume_image_id': volume_size = int(volume_size) img_gigs = functions.bytes_to_gigabytes(image.size) smallest_size = max(img_gigs, image.min_disk) if volume_size < smallest_size: msg = (_("The Volume size is too small for the" " '%(image_name)s' image and has to be" " greater than or equal to " "'%(smallest_size)d' GB.") % { 'image_name': image.name, 'smallest_size': smallest_size }) self._errors['volume_size'] = self.error_class( [msg]) elif source_type == 'instance_snapshot_id': if not cleaned_data['instance_snapshot_id']: msg = _("You must select a snapshot.") self._errors['instance_snapshot_id'] = self.error_class([msg]) elif source_type == 'volume_id': if not cleaned_data.get('volume_id'): msg = _("You must select a volume.") self._errors['volume_id'] = self.error_class([msg]) # Prevent launching multiple instances with the same volume. # TODO(gabriel): is it safe to launch multiple instances with # a snapshot since it should be cloned to new volumes? if count > 1: msg = _('Launching multiple instances is only supported for ' 'images and instance snapshots.') raise forms.ValidationError(msg) elif source_type == 'volume_snapshot_id': if not cleaned_data.get('volume_snapshot_id'): msg = _("You must select a snapshot.") self._errors['volume_snapshot_id'] = self.error_class([msg]) if not cleaned_data.get('device_name'): msg = _("You must set device name") self._errors['device_name'] = self.error_class([msg]) return cleaned_data def populate_flavor_choices(self, request, context): flavors = instance_utils.flavor_list(request) if flavors: return instance_utils.sort_flavor_list(request, flavors) return [] def populate_availability_zone_choices(self, request, context): try: zones = api.nova.availability_zone_list(request) except Exception: zones = [] exceptions.handle(request, _('Unable to retrieve availability zones.')) zone_list = [(zone.zoneName, zone.zoneName) for zone in zones if zone.zoneState['available']] zone_list.sort() if not zone_list: zone_list.insert(0, ("", _("No availability zones found"))) elif len(zone_list) > 1: zone_list.insert(0, ("", _("Any Availability Zone"))) return zone_list def get_help_text(self, extra_context=None): extra = extra_context or {} try: extra['usages'] = api.nova.tenant_absolute_limits(self.request) extra['usages_json'] = json.dumps(extra['usages']) flavors = json.dumps( [f._info for f in instance_utils.flavor_list(self.request)]) extra['flavors'] = flavors images = image_utils.get_available_images( self.request, self.initial['project_id'], self._images_cache) if images is not None: attrs = [{ 'id': i.id, 'min_disk': getattr(i, 'min_disk', 0), 'min_ram': getattr(i, 'min_ram', 0) } for i in images] extra['images'] = json.dumps(attrs) except Exception: exceptions.handle(self.request, _("Unable to retrieve quota information.")) return super(SetInstanceDetailsAction, self).get_help_text(extra) def _init_images_cache(self): if not hasattr(self, '_images_cache'): self._images_cache = {} def _get_volume_display_name(self, volume): if hasattr(volume, "volume_id"): vol_type = "snap" visible_label = _("Snapshot") else: vol_type = "vol" visible_label = _("Volume") return (("%s:%s" % (volume.id, vol_type)), (_("%(name)s - %(size)s GB (%(label)s)") % { 'name': volume.name, 'size': volume.size, 'label': visible_label })) def populate_image_id_choices(self, request, context): choices = [] images = image_utils.get_available_images(request, context.get('project_id'), self._images_cache) for image in images: image.bytes = image.size image.volume_size = max(image.min_disk, functions.bytes_to_gigabytes(image.bytes)) choices.append((image.id, image)) if context.get('image_id') == image.id and \ 'volume_size' not in context: context['volume_size'] = image.volume_size if choices: choices.sort(key=lambda c: c[1].name) choices.insert(0, ("", _("Select Image"))) else: choices.insert(0, ("", _("No images available"))) return choices def populate_instance_snapshot_id_choices(self, request, context): images = image_utils.get_available_images(request, context.get('project_id'), self._images_cache) choices = [(image.id, image.name) for image in images if image.properties.get("image_type", '') == "snapshot"] if choices: choices.sort(key=operator.itemgetter(1)) choices.insert(0, ("", _("Select Instance Snapshot"))) else: choices.insert(0, ("", _("No snapshots available"))) return choices def populate_volume_id_choices(self, request, context): try: volumes = [ self._get_volume_display_name(v) for v in cinder.volume_list(self.request) if v.status == api.cinder.VOLUME_STATE_AVAILABLE and v.bootable == 'true' ] except Exception: volumes = [] exceptions.handle(self.request, _('Unable to retrieve list of volumes.')) if volumes: volumes.insert(0, ("", _("Select Volume"))) else: volumes.insert(0, ("", _("No volumes available"))) return volumes def populate_volume_snapshot_id_choices(self, request, context): try: snapshots = cinder.volume_snapshot_list(self.request) snapshots = [ self._get_volume_display_name(s) for s in snapshots if s.status == api.cinder.VOLUME_STATE_AVAILABLE ] except Exception: snapshots = [] exceptions.handle( self.request, _('Unable to retrieve list of volume ' 'snapshots.')) if snapshots: snapshots.insert(0, ("", _("Select Volume Snapshot"))) else: snapshots.insert(0, ("", _("No volume snapshots available"))) return snapshots
def formatted_file_size(self, obj): return filesizeformat(obj.file_size)
def list_lib_dir(request, repo_id): ''' New ajax API for list library directory ''' content_type = 'application/json; charset=utf-8' result = {} repo = get_repo(repo_id) if not repo: err_msg = _('Library does not exist.') return HttpResponse(json.dumps({'error': err_msg}), status=400, content_type=content_type) username = request.user.username path = request.GET.get('p', '/') path = normalize_dir_path(path) dir_id = seafile_api.get_dir_id_by_path(repo.id, path) if not dir_id: err_msg = 'Folder not found.' return HttpResponse(json.dumps({'error': err_msg}), status=404, content_type=content_type) # perm for current dir user_perm = check_folder_permission(request, repo_id, path) if not user_perm: return convert_repo_path_when_can_not_view_folder( request, repo_id, path) if repo.encrypted \ and not seafile_api.is_password_set(repo.id, username): err_msg = _('Library is encrypted.') return HttpResponse(json.dumps({ 'error': err_msg, 'lib_need_decrypt': True }), status=403, content_type=content_type) head_commit = get_commit(repo.id, repo.version, repo.head_cmmt_id) if not head_commit: err_msg = _('Error: no head commit id') return HttpResponse(json.dumps({'error': err_msg}), status=500, content_type=content_type) dir_list = [] file_list = [] dirs = seafserv_threaded_rpc.list_dir_with_perm(repo_id, path, dir_id, username, -1, -1) starred_files = get_dir_starred_files(username, repo_id, path) for dirent in dirs: dirent.last_modified = dirent.mtime if stat.S_ISDIR(dirent.mode): dpath = posixpath.join(path, dirent.obj_name) if dpath[-1] != '/': dpath += '/' dir_list.append(dirent) else: if repo.version == 0: file_size = seafile_api.get_file_size(repo.store_id, repo.version, dirent.obj_id) else: file_size = dirent.size dirent.file_size = file_size if file_size else 0 dirent.starred = False fpath = posixpath.join(path, dirent.obj_name) if fpath in starred_files: dirent.starred = True file_list.append(dirent) if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) else: repo_owner = seafile_api.get_repo_owner(repo.id) result["repo_owner"] = repo_owner result["is_repo_owner"] = False result["has_been_shared_out"] = False result["is_admin"] = is_repo_admin(username, repo_id) if repo_owner == username: result["is_repo_owner"] = True try: result["has_been_shared_out"] = repo_has_been_shared_out( request, repo_id) except Exception as e: logger.error(e) if result["is_admin"]: result["has_been_shared_out"] = True result["is_virtual"] = repo.is_virtual result["repo_name"] = repo.name result["user_perm"] = user_perm # check quota for fileupload result["no_quota"] = True if seaserv.check_quota(repo.id) < 0 else False result["encrypted"] = repo.encrypted dirent_list = [] for d in dir_list: d_ = {} d_['is_dir'] = True d_['obj_name'] = d.obj_name d_['last_modified'] = d.last_modified d_['last_update'] = translate_seahub_time(d.last_modified) d_['p_dpath'] = posixpath.join(path, d.obj_name) d_['perm'] = d.permission # perm for sub dir in current dir dirent_list.append(d_) size = int(request.GET.get('thumbnail_size', THUMBNAIL_DEFAULT_SIZE)) for f in file_list: f_ = {} f_['is_file'] = True f_['obj_name'] = f.obj_name f_['last_modified'] = f.last_modified f_['last_update'] = translate_seahub_time(f.last_modified) f_['starred'] = f.starred f_['file_size'] = filesizeformat(f.file_size) f_['obj_id'] = f.obj_id f_['perm'] = f.permission # perm for file in current dir if not repo.encrypted and ENABLE_THUMBNAIL: # used for providing a way to determine # if send a request to create thumbnail. fileExt = os.path.splitext(f.obj_name)[1][1:].lower() file_type = FILEEXT_TYPE_MAP.get(fileExt) if file_type == IMAGE: f_['is_img'] = True if file_type == VIDEO and ENABLE_VIDEO_THUMBNAIL: f_['is_video'] = True if file_type == XMIND: f_['is_xmind'] = True if file_type in (IMAGE, XMIND) or \ file_type == VIDEO and ENABLE_VIDEO_THUMBNAIL: # if thumbnail has already been created, return its src. # Then web browser will use this src to get thumbnail instead of # recreating it. thumbnail_file_path = os.path.join(THUMBNAIL_ROOT, str(size), f.obj_id) thumbnail_exist = os.path.exists(thumbnail_file_path) if thumbnail_exist: file_path = posixpath.join(path, f.obj_name) src = get_thumbnail_src(repo_id, size, file_path) f_['encoded_thumbnail_src'] = urlquote(src) if is_pro_version(): f_['is_locked'] = True if f.is_locked else False f_['lock_owner'] = f.lock_owner f_['lock_owner_name'] = email2nickname(f.lock_owner) f_['locked_by_me'] = False if f.lock_owner == username: f_['locked_by_me'] = True if f.lock_owner == ONLINE_OFFICE_LOCK_OWNER and \ user_perm == PERMISSION_READ_WRITE: f_['locked_by_me'] = True dirent_list.append(f_) result["dirent_list"] = dirent_list return HttpResponse(json.dumps(result), content_type=content_type)
def handle(self, request, data): try: usages = quotas.tenant_quota_usages(self.request, targets=('volumes', 'gigabytes')) availableGB = usages['gigabytes']['available'] availableVol = usages['volumes']['available'] snapshot_id = None image_id = None volume_id = None source_type = data.get('volume_source_type', None) az = data.get('availability_zone', None) or None volume_type = data.get('type') if (data.get("snapshot_source", None) and source_type in ['', None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if data['size'] < snapshot.size: error_message = (_('The volume size cannot be less than ' 'the snapshot size (%sGiB)') % snapshot.size) raise ValidationError(error_message) az = None volume_type = "" elif (data.get("image_source", None) and source_type in ['', None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = functions.bytes_to_gigabytes(image.size) if data['size'] < image_size: error_message = (_('The volume size cannot be less than ' 'the image size (%s)') % filesizeformat(image.size)) raise ValidationError(error_message) properties = getattr(image, 'properties', {}) min_disk_size = (getattr(image, 'min_disk', 0) or properties.get('min_disk', 0)) if min_disk_size > 0 and data['size'] < min_disk_size: error_message = (_('The volume size cannot be less than ' 'the image minimum disk size (%sGiB)') % min_disk_size) raise ValidationError(error_message) elif (data.get("volume_source", None) and source_type in ['', None, 'volume_source']): # Create from volume volume = self.get_volume(request, data["volume_source"]) volume_id = volume.id volume_type = None if data['size'] < volume.size: error_message = (_('The volume size cannot be less than ' 'the source volume size (%sGiB)') % volume.size) raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGiB cannot be created ' 'as you only have %(avail)iGiB of your ' 'quota available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} volume = cinder.volume_create(request, data['size'], data['name'], data['description'], volume_type, snapshot_id=snapshot_id, image_id=image_id, metadata=metadata, availability_zone=az, source_volid=volume_id, group_id=data.get('group') or None) message = _('Creating volume "%s"') % volume.name messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except Exception: redirect = reverse("horizon:project:volumes:index") exceptions.handle(request, _("Unable to create volume."), redirect=redirect)
def file_size(file, article): try: return filesizeformat(file.get_file_size(article)) except BaseException: return 0
def __call__(self, value): if value.size > self.max_size: human_readable_size = filesizeformat(self.max_size) raise serializers.ValidationError( self.message.format(size=human_readable_size))
def handle(self, request, data): try: # FIXME(johnp): cinderclient currently returns a useless # error message when the quota is exceeded when trying to create # a volume, so we need to check for that scenario here before we # send it off to try and create. usages = cinder.tenant_absolute_limits(self.request) volumes = cinder.volume_list(self.request) total_size = sum( [getattr(volume, 'size', 0) for volume in volumes]) usages['gigabytesUsed'] = total_size usages['volumesUsed'] = len(volumes) availableGB = usages['maxTotalVolumeGigabytes'] -\ usages['gigabytesUsed'] availableVol = usages['maxTotalVolumes'] - usages['volumesUsed'] snapshot_id = None image_id = None source_type = data.get('volume_source_type', None) if (data.get("snapshot_source", None) and source_type in [None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if (data['size'] < snapshot.size): error_message = _('The volume size cannot be less than ' 'the snapshot size (%sGB)' % snapshot.size) raise ValidationError(error_message) elif (data.get("image_source", None) and source_type in [None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = bytes_to_gigabytes(image.size) if (data['size'] < image_size): error_message = _('The volume size cannot be less than ' 'the image size (%s)' % filesizeformat(image.size)) raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGB cannot be created as ' 'you only have %(avail)iGB of your quota ' 'available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} if data['encryption']: metadata['encryption'] = data['encryption'] volume = cinder.volume_create(request, data['size'], data['name'], data['description'], data['type'], snapshot_id=snapshot_id, image_id=image_id, metadata=metadata) message = _('Creating volume "%s"') % data['name'] messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except: exceptions.handle(request, ignore=True) self.api_error(_("Unable to create volume.")) return False
def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) volume_types = cinder.volume_type_list(request) self.fields['type'].choices = [("", "")] + \ [(type.name, type.name) for type in volume_types] # Hide the volume encryption field if the hypervisor doesn't support it # NOTE: as of Grizzly this is not yet supported in Nova so enabling # this setting will not do anything useful hypervisor_features = getattr(settings, "OPENSTACK_HYPERVISOR_FEATURES", {}) can_encrypt_volumes = hypervisor_features.get("can_encrypt_volumes", False) if can_encrypt_volumes: # TODO(laura-glendenning) get from api call in future encryption_options = {"LUKS": "dmcrypt LUKS"} self.fields['encryption'].choices = [("", "")] + \ [(enc, display) for enc, display in encryption_options.items()] else: self.fields['encryption'].widget = forms.widgets.HiddenInput() self.fields['encryption'].required = False if ("snapshot_id" in request.GET): try: snapshot = self.get_snapshot(request, request.GET["snapshot_id"]) self.fields['name'].initial = snapshot.display_name self.fields['size'].initial = snapshot.size self.fields['snapshot_source'].choices = ((snapshot.id, snapshot), ) try: # Set the volume type from the original volume orig_volume = cinder.volume_get(request, snapshot.volume_id) self.fields['type'].initial = orig_volume.volume_type except: pass self.fields['size'].help_text = _( 'Volume size must be equal ' 'to or greater than the snapshot size (%sGB)' % snapshot.size) del self.fields['image_source'] del self.fields['volume_source_type'] except: exceptions.handle(request, _('Unable to load the specified snapshot.')) elif ('image_id' in request.GET): try: image = self.get_image(request, request.GET["image_id"]) image.bytes = image.size self.fields['name'].initial = image.name self.fields['size'].initial = bytes_to_gigabytes(image.size) self.fields['image_source'].choices = ((image.id, image), ) self.fields['size'].help_text = _( 'Volume size must be equal ' 'to or greater than the image size (%s)' % filesizeformat(image.size)) del self.fields['snapshot_source'] del self.fields['volume_source_type'] except: msg = _('Unable to load the specified image. %s') exceptions.handle(request, msg % request.GET['image_id']) else: source_type_choices = [] try: snapshots = cinder.volume_snapshot_list(request) if snapshots: source_type_choices.append( ("snapshot_source", _("Snapshot"))) choices = [('', _("Choose a snapshot"))] + \ [(s.id, s) for s in snapshots] self.fields['snapshot_source'].choices = choices else: del self.fields['snapshot_source'] except: exceptions.handle(request, _("Unable to retrieve " "volume snapshots.")) images = get_available_images(request, request.user.tenant_id) if images: source_type_choices.append(("image_source", _("Image"))) choices = [('', _("Choose an image"))] for image in images: image.bytes = image.size image.size = bytes_to_gigabytes(image.bytes) choices.append((image.id, image)) self.fields['image_source'].choices = choices else: del self.fields['image_source'] if source_type_choices: choices = ( [('no_source_type', _("No source, empty volume."))] + source_type_choices) self.fields['volume_source_type'].choices = choices else: del self.fields['volume_source_type']
def test_negative_numbers(self): self.assertEqual(filesizeformat(-100), '-100\xa0bytes') self.assertEqual(filesizeformat(-1024 * 1024 * 50), '-50.0\xa0MB')
def handle(self, request, data): try: usages = quotas.tenant_limit_usages(self.request) availableGB = usages['maxTotalVolumeGigabytes'] - \ usages['gigabytesUsed'] availableVol = usages['maxTotalVolumes'] - usages['volumesUsed'] snapshot_id = None image_id = None volume_id = None source_type = data.get('volume_source_type', None) az = data.get('availability_zone', None) or None if (data.get("snapshot_source", None) and source_type in [None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if (data['size'] < snapshot.size): error_message = _( 'The volume size cannot be less than ' 'the snapshot size (%sGB)') % snapshot.size raise ValidationError(error_message) az = None elif (data.get("image_source", None) and source_type in [None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = functions.bytes_to_gigabytes(image.size) if (data['size'] < image_size): error_message = _('The volume size cannot be less than ' 'the image size (%s)') % filesizeformat( image.size) raise ValidationError(error_message) min_disk_size = getattr(image, 'min_disk', 0) if (min_disk_size > 0 and data['size'] < image.min_disk): error_message = _( 'The volume size cannot be less than ' 'the image minimum disk size (%sGB)') % min_disk_size raise ValidationError(error_message) elif (data.get("volume_source", None) and source_type in [None, 'volume_source']): # Create from volume volume = self.get_volume(request, data["volume_source"]) volume_id = volume.id if data['size'] < volume.size: error_message = _( 'The volume size cannot be less than ' 'the source volume size (%sGB)') % volume.size raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGB cannot be created as ' 'you only have %(avail)iGB of your quota ' 'available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} volume = cinder.volume_create(request, data['size'], data['name'], data['description'], data['type'], snapshot_id=snapshot_id, image_id=image_id, metadata=metadata, availability_zone=az, source_volid=volume_id) message = _('Creating volume "%s"') % data['name'] messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except Exception: exceptions.handle(request, ignore=True) self.api_error(_("Unable to create volume.")) return False
def get_memory(instance): return filesizeformat(instance.flavor.ram * 1024 * 1024)
def test_formats(self): self.assertEqual(filesizeformat(1023), '1023\xa0bytes') self.assertEqual(filesizeformat(1024), '1.0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1.0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1.0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1.0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000.0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0bytes') self.assertEqual(filesizeformat(""), '0\xa0bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0bytes')
class CreateForm(forms.SelfHandlingForm): name = forms.CharField(max_length="255", label=_("Volume Name")) description = forms.CharField( widget=forms.Textarea(attrs={'class': 'modal-body-fixed-width'}), label=_("Description"), required=False) type = forms.ChoiceField(label=_("Type"), required=False) size = forms.IntegerField(min_value=1, label=_("Size (GB)")) volume_source_type = forms.ChoiceField( label=_("Volume Source"), required=False, widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'source' })) snapshot_source = forms.ChoiceField( label=_("Use snapshot as a source"), widget=forms.SelectWidget(attrs={'class': 'snapshot-selector'}, data_attrs=('size', 'name'), transform=lambda x: "%s (%sGB)" % (x.name, x.size)), required=False) image_source = forms.ChoiceField( label=_("Use image as a source"), widget=forms.SelectWidget(attrs={'class': 'image-selector'}, data_attrs=('size', 'name', 'min_disk'), transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))), required=False) volume_source = forms.ChoiceField( label=_("Use a volume as source"), widget=forms.SelectWidget( attrs={'class': 'image-selector'}, data_attrs=('size', 'name'), transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.size * 1024 * 1024 * 1024))), required=False) availability_zone = forms.ChoiceField( label=_("Availability Zone"), required=False, widget=forms.Select( attrs={ 'class': 'switched', 'data-switch-on': 'source', 'data-source-no_source_type': _('Availability Zone'), 'data-source-image_source': _('Availability Zone') })) def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) volume_types = cinder.volume_type_list(request) self.fields['type'].choices = [("", "")] + \ [(type.name, type.name) for type in volume_types] if "snapshot_id" in request.GET: try: snapshot = self.get_snapshot(request, request.GET["snapshot_id"]) self.fields['name'].initial = snapshot.name self.fields['size'].initial = snapshot.size self.fields['snapshot_source'].choices = ((snapshot.id, snapshot), ) try: # Set the volume type from the original volume orig_volume = cinder.volume_get(request, snapshot.volume_id) self.fields['type'].initial = orig_volume.volume_type except Exception: pass self.fields['size'].help_text = _('Volume size must be equal ' 'to or greater than the snapshot size (%sGB)') \ % snapshot.size del self.fields['image_source'] del self.fields['volume_source'] del self.fields['volume_source_type'] del self.fields['availability_zone'] except Exception: exceptions.handle(request, _('Unable to load the specified snapshot.')) elif 'image_id' in request.GET: self.fields['availability_zone'].choices = \ self.availability_zones(request) try: image = self.get_image(request, request.GET["image_id"]) image.bytes = image.size self.fields['name'].initial = image.name min_vol_size = functions.bytes_to_gigabytes(image.size) size_help_text = _('Volume size must be equal to or greater ' 'than the image size (%s)') \ % filesizeformat(image.size) min_disk_size = getattr(image, 'min_disk', 0) if (min_disk_size > min_vol_size): min_vol_size = min_disk_size size_help_text = _('Volume size must be equal to or ' 'greater than the image minimum ' 'disk size (%sGB)') \ % min_disk_size self.fields['size'].initial = min_vol_size self.fields['size'].help_text = size_help_text self.fields['image_source'].choices = ((image.id, image), ) del self.fields['snapshot_source'] del self.fields['volume_source'] del self.fields['volume_source_type'] except Exception: msg = _('Unable to load the specified image. %s') exceptions.handle(request, msg % request.GET['image_id']) elif 'volume_id' in request.GET: self.fields['availability_zone'].choices = \ self.availability_zones(request) volume = None try: volume = self.get_volume(request, request.GET["volume_id"]) except Exception: msg = _('Unable to load the specified volume. %s') exceptions.handle(request, msg % request.GET['volume_id']) if volume is not None: self.fields['name'].initial = volume.name self.fields['description'].initial = volume.description min_vol_size = volume.size size_help_text = _('Volume size must be equal to or greater ' 'than the origin volume size (%s)') \ % filesizeformat(volume.size) self.fields['size'].initial = min_vol_size self.fields['size'].help_text = size_help_text self.fields['volume_source'].choices = ((volume.id, volume), ) self.fields['type'].initial = volume.type del self.fields['snapshot_source'] del self.fields['image_source'] del self.fields['volume_source_type'] else: source_type_choices = [] self.fields['availability_zone'].choices = \ self.availability_zones(request) try: snapshot_list = cinder.volume_snapshot_list(request) snapshots = [ s for s in snapshot_list if s.status == 'available' ] if snapshots: source_type_choices.append( ("snapshot_source", _("Snapshot"))) choices = [('', _("Choose a snapshot"))] + \ [(s.id, s) for s in snapshots] self.fields['snapshot_source'].choices = choices else: del self.fields['snapshot_source'] except Exception: exceptions.handle(request, _("Unable to retrieve " "volume snapshots.")) images = utils.get_available_images(request, request.user.tenant_id) if images: source_type_choices.append(("image_source", _("Image"))) choices = [('', _("Choose an image"))] for image in images: image.bytes = image.size image.size = functions.bytes_to_gigabytes(image.bytes) choices.append((image.id, image)) self.fields['image_source'].choices = choices else: del self.fields['image_source'] volumes = self.get_volumes(request) if volumes: source_type_choices.append(("volume_source", _("Volume"))) choices = [('', _("Choose a volume"))] for volume in volumes: choices.append((volume.id, volume)) self.fields['volume_source'].choices = choices else: del self.fields['volume_source'] if source_type_choices: choices = ([('no_source_type', _("No source, empty volume"))] + source_type_choices) self.fields['volume_source_type'].choices = choices else: del self.fields['volume_source_type'] # Determine whether the extension for Cinder AZs is enabled def cinder_az_supported(self, request): try: return cinder.extension_supported(request, 'AvailabilityZones') except Exception: exceptions.handle( request, _('Unable to determine if ' 'availability zones extension ' 'is supported.')) return False def availability_zones(self, request): zone_list = [] if self.cinder_az_supported(request): try: zones = api.cinder.availability_zone_list(request) zone_list = [(zone.zoneName, zone.zoneName) for zone in zones if zone.zoneState['available']] zone_list.sort() except Exception: exceptions.handle( request, _('Unable to retrieve availability ' 'zones.')) if not zone_list: zone_list.insert(0, ("", _("No availability zones found"))) elif len(zone_list) > 0: zone_list.insert(0, ("", _("Any Availability Zone"))) return zone_list def get_volumes(self, request): volumes = [] try: volume_list = cinder.volume_list(self.request) if volume_list is not None: volumes = [ v for v in volume_list if v.status == api.cinder.VOLUME_STATE_AVAILABLE ] except Exception: exceptions.handle(request, _('Unable to retrieve list of volumes.')) return volumes def handle(self, request, data): try: usages = quotas.tenant_limit_usages(self.request) availableGB = usages['maxTotalVolumeGigabytes'] - \ usages['gigabytesUsed'] availableVol = usages['maxTotalVolumes'] - usages['volumesUsed'] snapshot_id = None image_id = None volume_id = None source_type = data.get('volume_source_type', None) az = data.get('availability_zone', None) or None if (data.get("snapshot_source", None) and source_type in [None, 'snapshot_source']): # Create from Snapshot snapshot = self.get_snapshot(request, data["snapshot_source"]) snapshot_id = snapshot.id if (data['size'] < snapshot.size): error_message = _( 'The volume size cannot be less than ' 'the snapshot size (%sGB)') % snapshot.size raise ValidationError(error_message) az = None elif (data.get("image_source", None) and source_type in [None, 'image_source']): # Create from Snapshot image = self.get_image(request, data["image_source"]) image_id = image.id image_size = functions.bytes_to_gigabytes(image.size) if (data['size'] < image_size): error_message = _('The volume size cannot be less than ' 'the image size (%s)') % filesizeformat( image.size) raise ValidationError(error_message) min_disk_size = getattr(image, 'min_disk', 0) if (min_disk_size > 0 and data['size'] < image.min_disk): error_message = _( 'The volume size cannot be less than ' 'the image minimum disk size (%sGB)') % min_disk_size raise ValidationError(error_message) elif (data.get("volume_source", None) and source_type in [None, 'volume_source']): # Create from volume volume = self.get_volume(request, data["volume_source"]) volume_id = volume.id if data['size'] < volume.size: error_message = _( 'The volume size cannot be less than ' 'the source volume size (%sGB)') % volume.size raise ValidationError(error_message) else: if type(data['size']) is str: data['size'] = int(data['size']) if availableGB < data['size']: error_message = _('A volume of %(req)iGB cannot be created as ' 'you only have %(avail)iGB of your quota ' 'available.') params = {'req': data['size'], 'avail': availableGB} raise ValidationError(error_message % params) elif availableVol <= 0: error_message = _('You are already using all of your available' ' volumes.') raise ValidationError(error_message) metadata = {} volume = cinder.volume_create(request, data['size'], data['name'], data['description'], data['type'], snapshot_id=snapshot_id, image_id=image_id, metadata=metadata, availability_zone=az, source_volid=volume_id) message = _('Creating volume "%s"') % data['name'] messages.info(request, message) return volume except ValidationError as e: self.api_error(e.messages[0]) return False except Exception: exceptions.handle(request, ignore=True) self.api_error(_("Unable to create volume.")) return False @memoized def get_snapshot(self, request, id): return cinder.volume_snapshot_get(request, id) @memoized def get_image(self, request, id): return glance.image_get(request, id) @memoized def get_volume(self, request, id): return cinder.volume_get(request, id)
def main(args): """ Main entry point """ start_year = 1948 end_year = 2051 # data_reqs = DataRequest.objects.filter( # institute__short_name='ECMWF', # experiment__short_name='hist-1950', # variable_request__table_name__startswith='Prim', # datafile__isnull=False # ).distinct() # data_reqs = DataRequest.objects.filter( # institute__short_name='ECMWF', # climate_model__short_name__in=['ECMWF-IFS-LR', 'ECMWF-IFS-MR'], # experiment__short_name='hist-1950', # variable_request__table_name__startswith='Prim', # datafile__isnull=False # ).distinct() # data_reqs = DataRequest.objects.filter( # institute__short_name='ECMWF', # climate_model__short_name='ECMWF-IFS-HR', # experiment__short_name='hist-1950', # rip_code__in=[f'r{r}i1p1f1' for r in range(4, 7)], # variable_request__table_name__startswith='Prim', # datafile__isnull=False # ).distinct() # data_reqs = DataRequest.objects.filter( # institute__short_name='ECMWF', # # climate_model__short_name='ECMWF-IFS-HR', # experiment__short_name='control-1950', # variable_request__table_name__startswith='Prim', # datafile__isnull=False # ).distinct() # data_reqs = DataRequest.objects.filter( # institute__short_name='ECMWF', # experiment__short_name='highresSST-present', # variable_request__table_name__startswith='Prim', # datafile__isnull=False # ).distinct() data_reqs = DataRequest.objects.filter( institute__short_name='ECMWF', experiment__short_name='spinup-1950', variable_request__table_name__startswith='Prim', datafile__isnull=False ).distinct() logger.debug('Total data volume: {} Volume to restore: {}'.format( filesizeformat(get_request_size(data_reqs, start_year, end_year)). replace('\xa0', ' '), filesizeformat(get_request_size(data_reqs, start_year, end_year, offline=True)).replace('\xa0', ' '), )) if args.create: jon = User.objects.get(username='******') rr = RetrievalRequest.objects.create(requester=jon, start_year=start_year, end_year=end_year) time_zone = datetime.timezone(datetime.timedelta()) rr.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone) rr.save() rr.data_request.add(*data_reqs) logger.debug('Retrieval request {} created.'.format(rr.id))
def post(self, request): image_file = request.FILES.get('avatar', None) avatar_size = request.data.get('avatar_size', 64) if not image_file: error_msg = 'avatar invalid.' return api_error(status.HTTP_400_BAD_REQUEST, error_msg) try: avatar_size = int(avatar_size) except Exception as e: logger.error(e) error_msg = 'avatar_size invalid.' return api_error(status.HTTP_400_BAD_REQUEST, error_msg) (root, ext) = os.path.splitext(image_file.name.lower()) if AVATAR_ALLOWED_FILE_EXTS and ext not in AVATAR_ALLOWED_FILE_EXTS: error_msg = _( u"%(ext)s is an invalid file extension. Authorized extensions are : %(valid_exts_list)s" ) % { 'ext': ext, 'valid_exts_list': ", ".join(AVATAR_ALLOWED_FILE_EXTS) } return api_error(status.HTTP_400_BAD_REQUEST, error_msg) if image_file.size > AVATAR_MAX_SIZE: error_msg = _( u"Your file is too big (%(size)s), the maximum allowed size is %(max_valid_size)s" ) % { 'size': filesizeformat(image_file.size), 'max_valid_size': filesizeformat(AVATAR_MAX_SIZE) } return api_error(status.HTTP_400_BAD_REQUEST, error_msg) username = request.user.username count = Avatar.objects.filter(emailuser=username).count() if AVATAR_MAX_AVATARS_PER_USER > 1 and count >= AVATAR_MAX_AVATARS_PER_USER: error_msg = _( u"You already have %(nb_avatars)d avatars, and the maximum allowed is %(nb_max_avatars)d." ) % { 'nb_avatars': count, 'nb_max_avatars': AVATAR_MAX_AVATARS_PER_USER } return api_error(status.HTTP_400_BAD_REQUEST, error_msg) try: avatar = Avatar( emailuser=username, primary=True, ) avatar.avatar.save(image_file.name, image_file) avatar.save() avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar) avatar_url, is_default, date_uploaded = api_avatar_url( username, int(avatar_size)) except Exception as e: logger.error(e) error_msg = 'Internal Server Error' return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg) return Response({'avatar_url': request.build_absolute_uri(avatar_url)})
def get_size_error(): return force_text( _("Uploaded file too large ( > %s )") % filesizeformat(get_max_file_size()))
def copy_all_missing(self, mode, archives: Iterable[Archive] = None): files_torrent = [] files_hath = [] if not archives: found_archives: Iterable[Archive] = list(Archive.objects.filter_by_dl_remote()) else: found_archives = archives if not found_archives: return for archive in found_archives: if not os.path.isfile(archive.zipped.path): if 'torrent' in archive.match_type: files_torrent.append(archive) elif 'hath' in archive.match_type: files_hath.append(archive) if len(files_torrent) + len(files_hath) == 0: return # Hath downloads if len(files_hath) > 0: files_matched_hath = [] for matched_file in os.listdir(self.settings.providers['panda'].local_hath_folder): if os.path.isfile(os.path.join(self.settings.providers['panda'].local_hath_folder, matched_file)): continue m = re.search(r'.*?\[(\d+)\]$', matched_file) if m: for archive in files_hath: if m.group(1) == archive.gallery.gid: files_matched_hath.append( [matched_file, archive.zipped.path, int(archive.filesize), archive]) for img_dir in files_matched_hath: total_remote_size = 0 remote_files = [] directory = os.path.join(self.settings.providers['panda'].local_hath_folder, img_dir[0]) for img_file in os.listdir(directory): if not os.path.isfile(os.path.join(directory, img_file)) or img_file == 'galleryinfo.txt': continue total_remote_size += os.stat( os.path.join(directory, img_file)).st_size remote_files.append( os.path.join(directory, img_file)) if total_remote_size != img_dir[2]: self.logger.info( "For archive: {archive}, folder: {folder} " "has not completed the download ({current}/{total}), skipping".format( archive=img_dir[3], folder=img_dir[0], current=filesizeformat(total_remote_size), total=filesizeformat(img_dir[2]) ) ) continue self.logger.info( "For archive: {archive}, creating zip " "for folder {filename}, {image_count} images".format( archive=img_dir[3], filename=img_dir[1], image_count=len(remote_files) )) dir_path = mkdtemp() for img_file_original in remote_files: img_file = os.path.split(img_file_original)[1] if mode == 'local_move': shutil.move(img_file_original, os.path.join(dir_path, img_file)) else: shutil.copy(img_file_original, os.path.join(dir_path, img_file)) with ZipFile(os.path.join(self.settings.MEDIA_ROOT, img_dir[1]), 'w') as archive_file: for (root_path, _, file_names) in os.walk(dir_path): for current_file in file_names: archive_file.write( os.path.join(root_path, current_file), arcname=os.path.basename(current_file)) shutil.rmtree(dir_path, ignore_errors=True) self.process_downloaded_archive(img_dir[3]) # Torrent downloads if len(files_torrent) > 0: files_matched_torrent = [] for filename in os.listdir(self.settings.torrent['download_dir']): for archive in files_torrent: if archive.gallery: cleaned_torrent_name = os.path.splitext( os.path.basename(archive.zipped.path))[0].replace(' [' + archive.gallery.gid + ']', '') else: cleaned_torrent_name = os.path.splitext(os.path.basename(archive.zipped.path))[0] if replace_illegal_name(os.path.splitext(filename)[0]) in cleaned_torrent_name: files_matched_torrent.append([filename, not os.path.isfile( os.path.join(self.settings.torrent['download_dir'], filename)), archive]) for matched_file in files_matched_torrent: target = os.path.join(self.settings.torrent['download_dir'], matched_file[0]) if matched_file[1]: self.logger.info( "For archive: {archive}, creating zip for folder: {filename}".format( archive=matched_file[2], filename=matched_file[0], )) dir_path = mkdtemp() for img_file in os.listdir(target): if not os.path.isfile(os.path.join(target, img_file)): continue if mode == 'local_move': shutil.move(os.path.join(target, img_file), os.path.join(dir_path, img_file)) else: shutil.copy(os.path.join(target, img_file), os.path.join(dir_path, img_file)) with ZipFile(matched_file[2].zipped.path, 'w') as archive_file: for (root_path, _, file_names) in os.walk(dir_path): for current_file in file_names: archive_file.write( os.path.join(root_path, current_file), arcname=os.path.basename(current_file)) shutil.rmtree(dir_path, ignore_errors=True) else: self.logger.info( "For archive: {archive}, downloading file: {filename}".format( archive=matched_file[2], filename=matched_file[0], )) if mode == 'local_move': shutil.move(target, matched_file[2].zipped.path) else: shutil.copy(target, matched_file[2].zipped.path) if self.settings.convert_rar_to_zip and os.path.splitext(matched_file[0])[1].lower() == ".rar": self.logger.info( "For archive: {}, converting rar: {} to zip".format( matched_file[2], matched_file[2].zipped.path ) ) convert_rar_to_zip(matched_file[2].zipped.path) self.process_downloaded_archive(matched_file[2])
def get_maximum_size_display(self): return filesizeformat(bytes_=self.maximum_size)
def check_xpi_info(xpi_info, addon=None, xpi_file=None, user=None): from olympia.addons.models import Addon, DeniedGuid guid = xpi_info['guid'] is_webextension = xpi_info.get('is_webextension', False) # If we allow the guid to be omitted we assume that one was generated # or existed before and use that one. # An example are WebExtensions that don't require a guid but we generate # one once they're uploaded. Now, if you update that WebExtension we # just use the original guid. if addon and not guid and is_webextension: xpi_info['guid'] = guid = addon.guid if not guid and not is_webextension: raise forms.ValidationError(ugettext('Could not find an add-on ID.')) if guid: current_user = core.get_user() if current_user: deleted_guid_clashes = Addon.unfiltered.exclude( authors__id=current_user.id).filter(guid=guid) else: deleted_guid_clashes = Addon.unfiltered.filter(guid=guid) if addon and addon.guid != guid: msg = ugettext( 'The add-on ID in your manifest.json or install.rdf (%s) ' 'does not match the ID of your add-on on AMO (%s)') raise forms.ValidationError(msg % (guid, addon.guid)) if (not addon and # Non-deleted add-ons. ( Addon.objects.filter(guid=guid).exists() or # DeniedGuid objects for deletions for Mozilla disabled add-ons DeniedGuid.objects.filter(guid=guid).exists() or # Deleted add-ons that don't belong to the uploader. deleted_guid_clashes.exists())): raise forms.ValidationError(ugettext('Duplicate add-on ID found.')) if len(xpi_info['version']) > 32: raise forms.ValidationError( ugettext('Version numbers should have fewer than 32 characters.')) if not VERSION_RE.match(xpi_info['version']): raise forms.ValidationError( ugettext('Version numbers should only contain letters, numbers, ' 'and these punctuation characters: +*.-_.')) if is_webextension and xpi_info.get('type') == amo.ADDON_STATICTHEME: if not waffle.switch_is_active('allow-static-theme-uploads'): raise forms.ValidationError( ugettext( 'WebExtension theme uploads are currently not supported.')) max_size = settings.MAX_STATICTHEME_SIZE if xpi_file and os.path.getsize(xpi_file.name) > max_size: raise forms.ValidationError( ugettext( u'Maximum size for WebExtension themes is {0}.').format( filesizeformat(max_size))) if xpi_file: # Make sure we pass in a copy of `xpi_info` since # `resolve_webext_translations` modifies data in-place translations = Addon.resolve_webext_translations( xpi_info.copy(), xpi_file) verify_mozilla_trademark(translations['name'], core.get_user()) # Parse the file to get and validate package data with the addon. if not acl.submission_allowed(user, xpi_info): raise forms.ValidationError( ugettext(u'You cannot submit this type of add-on')) if not addon and not system_addon_submission_allowed(user, xpi_info): guids = ' or '.join('"' + guid + '"' for guid in amo.SYSTEM_ADDON_GUIDS) raise forms.ValidationError( ugettext(u'You cannot submit an add-on with a guid ending ' u'%s' % guids)) if not mozilla_signed_extension_submission_allowed(user, xpi_info): raise forms.ValidationError( ugettext(u'You cannot submit a Mozilla Signed Extension')) return xpi_info
def main(args): """ Main entry point """ start_year = 1948 end_year = 2051 # data_reqs = filter_hadgem_stream2(DataRequest.objects.filter( # climate_model__short_name='HadGEM3-GC31-LM', # experiment__short_name='highresSST-present', # rip_code__in=['r1i2p1f1', 'r1i3p1f1', 'r1i14p1f1', 'r1i15p1f1'], # datafile__isnull=False # ).exclude( # variable_request__table_name__startswith='Prim' # ).distinct()) # data_reqs = filter_hadgem_stream2(DataRequest.objects.filter( # climate_model__short_name='HadGEM3-GC31-MM', # experiment__short_name='highresSST-present', # rip_code__in=['r1i2p1f1', 'r1i3p1f1'], # datafile__isnull=False # ).exclude( # variable_request__table_name__startswith='Prim' # ).distinct()) # data_reqs = filter_hadgem_stream2(DataRequest.objects.filter( # climate_model__short_name='HadGEM3-GC31-HM', # experiment__short_name='highresSST-present', # rip_code__in=['r1i3p1f1'], # , 'r1i2p1f1'], # datafile__isnull=False # ).exclude( # variable_request__table_name__startswith='Prim' # ).distinct()) # data_reqs = filter_hadgem_stream2(DataRequest.objects.filter( # climate_model__short_name='HadGEM3-GC31-MH', # experiment__short_name='spinup-1950', # variable_request__table_name__in=['Omon', 'Oday', 'PrimOmon', 'PrimOday'], # datafile__isnull=False # ).distinct()) # data_reqs = filter_hadgem_stream2(DataRequest.objects.filter( # climate_model__short_name='HadGEM3-GC31-MH', # experiment__short_name='spinup-1950', # variable_request__table_name__in=['PrimOmon', 'PrimOday'], # datafile__isnull=False # ).distinct()) data_reqs = filter_hadgem_stream2( DataRequest.objects.filter(climate_model__short_name='HadGEM3-GC31-HM', experiment__short_name='hist-1950', rip_code='r1i2p1f1', variable_request__table_name='SImon', datafile__isnull=False).distinct()) logger.debug('Total data volume: {} Volume to restore: {}'.format( filesizeformat(get_request_size(data_reqs, start_year, end_year)).replace('\xa0', ' '), filesizeformat( get_request_size(data_reqs, start_year, end_year, offline=True)).replace('\xa0', ' '), )) if args.create: jon = User.objects.get(username='******') rr = RetrievalRequest.objects.create(requester=jon, start_year=start_year, end_year=end_year) time_zone = datetime.timezone(datetime.timedelta()) rr.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone) rr.save() rr.data_request.add(*data_reqs) logger.debug('Retrieval request {} created.'.format(rr.id))
def get_total_size_display(self): return format_lazy( '{} ({:0.1f}%)', filesizeformat(bytes_=self.get_total_size()), self.get_total_size() / self.maximum_size * 100 )