Пример #1
0
 def load(self, file_storage):
     field_dict = self.data
     field_dict = field_dict.copy()
     tmp_name = field_dict.pop('tmp_name')
     file_obj = UploadedFile(file=file_storage.open(tmp_name), **field_dict)
     file_obj.url = file_storage.url(tmp_name)
     return file_obj
Пример #2
0
    def testFileUpload(self):
        from django.http import QueryDict, HttpRequest
        from tardis.tardis_portal.views import upload
        from django.core.files import File
        from django.core.files.uploadedfile import UploadedFile
        from django.utils.datastructures import MultiValueDict
        from os import path

        # create request.FILES object

        django_file = File(self.f1)
        uploaded_file = UploadedFile(file=django_file)
        uploaded_file.name = self.filename
        uploaded_file.size = self.f1_size

        post_data = [("enctype", "multipart/form-data")]
        post = QueryDict("&".join(["%s=%s" % (k, v) for (k, v) in post_data]))

        files = MultiValueDict({"Filedata": [uploaded_file]})
        request = HttpRequest()
        request.FILES = files
        request.POST = post
        request.method = "POST"
        response = upload(request, self.dataset.id)
        test_files_db = models.Dataset_File.objects.filter(dataset__id=self.dataset.id)

        self.assertTrue(path.exists(path.join(self.dataset_path, self.filename)))
        self.assertTrue(self.dataset.id == 1)
        self.assertTrue(test_files_db[0].url == "tardis://testfile.txt")
Пример #3
0
 def test_unicode_file_names(self):
     """
     Regression test for #8156: files with unicode names I can't quite figure
     out the encoding situation between doctest and this file, but the actual
     repr doesn't matter; it just shouldn't return a unicode object.
     """
     uf = UploadedFile(name=u'¿Cómo?',content_type='text')
     self.assertEqual(type(uf.__repr__()), str)
Пример #4
0
 def __init__(self, path, file_name, content_type, charset):
     UploadedFile.__init__(self)
     self.path = path
     self.name = file_name
     self.content_type = content_type
     self.charset = charset
     self.file_handle = None
     self.mode = None
Пример #5
0
 def stitch_chunks(self):
     f = open(os.path.join(settings.MEDIA_ROOT, cloud_path(self, self.filename)), "wb")
     for chunk in self.chunks.all().order_by("pk"):
         f.write(chunk.chunk.read())
     f.close()
     f = UploadedFile(open(f.name, "rb"))
     self.upload.save(self.filename, f)
     self.state = Upload.STATE_COMPLETE
     self.save()
     f.close()
Пример #6
0
def document_validator(filepath, ex_first_row, ex_first_col):
    try:
        with open(os.path.join(settings.MEDIA_ROOT, filepath), 'r') as f:
            file = UploadedFile(f)
            dialect = csv.Sniffer().sniff(file.readline(), delimiters=[';', ',', '\t'])
            mimetype = magic.from_buffer(file.readline(), mime=True)
            file.seek(0)

            reader = csv.reader(file, dialect)

            temp_list = []
            for line in iter(reader):
                if reader.line_num == 1:
                    # save first row
                    temp_list.append(line)
            # save last row
            temp_list.append(line)

        print ex_first_row
        if not ex_first_row:
            print "Ciao"

        # check char in first row and first col
        if not ex_first_row and not float(temp_list[0][-1]):
            print 'Hola'
            raise ValueError
        if not ex_first_col and not float(temp_list[-1][0]):
            print 'Hello'
            raise ValueError
        ncol = (len(temp_list[0]) - 1) if ex_first_col else len(temp_list[0])
        nrow = (reader.line_num - 1) if ex_first_col else reader.line_num

        if nrow <= 2:
            print 'Hey'
            raise ValueError

        is_cubic = True if (ncol == nrow) else False
        return_value = {'is_valid': True, 'nrow': nrow, 'ncol': ncol, 'separator': dialect.delimiter,
                        'mimetype': mimetype, 'is_cubic': is_cubic}
    except csv.Error:
        print "Csv"
        return_value = {'is_valid': False}
        file = None
    except Exception:
        return_value = {'is_valid': False}
        print "Exc"
        file = None
    except ValueError:
        return_value = {'is_valid': False}
        print "Value"
        file = file

    return return_value, file
Пример #7
0
    def save(self, *args, **kwargs):
        s = BytesIO()

        data = zipfile.ZipFile(s,'a')
        projectfiles = fileobject.objects.filter(project=self.project)
        for filedata in projectfiles:
            filed = filedata.filename.read()
            pathAndName = str(self.project.title)+filedata.subfolder+os.path.split(str(filedata.filename))[1] #### this is where subfolders will be added to inside the zip file.
            data.writestr(pathAndName, filed)
        data.close()
        s.seek(0)
        filedata = UploadedFile(s)
        filedata.name = self.project.title+".zip"
        self.filename = filedata
        super(zippedObjectProxy, self,).save(generate=False, *args, **kwargs)
Пример #8
0
    def value_from_datadict(self, data, files, name):
        # we cache the return value of this function, since it is called a
        # bunch of time, and is expensive
        if self._cached_value is self._sentinel:
            upload = super().value_from_datadict(data, files, name)
            if upload != FILE_INPUT_CONTRADICTION:
                self.signed_path = data.get(self.signed_path_field_name(name), None)
                data_uri = data.get(self.data_uri_field_name(name))
                has_file = (upload or data_uri)
                # the path to the cached uploaded file
                path = None
                # if we have a cache key, and no file, fetch the file from the cache
                if self.signed_path and not has_file:
                    try:
                        path = self.signer.unsign(self.signed_path)
                    except BadSignature:
                        # False means the field value should be cleared, which
                        # is the best thing we can do in this case. It
                        # shouldn't happen anyways.
                        self.signed_path = ""
                        self._cached_value = None
                        return self._cached_value
                elif has_file:
                    # we have a file, so write it to disk, just in case form validation fails
                    with NamedTemporaryFile(prefix="".join(CHOICES[x % 64] for x in os.urandom(16)), suffix=".jpg", dir=self.tmp_dir, delete=False) as f:
                        # write the uploaded file to disk, or the data from the dataURI
                        try:
                            if upload:
                                f.write(upload.read())
                            else:
                                f.write(b64decode(data_uri[data_uri.find(",")+1:]))
                        except Error:
                            pass
                        else:
                            path = f.name
                            self.signed_path = self.signer.sign(f.name)

                if path:
                    upload = UploadedFile(open(path, "rb"), name=path, size=os.path.getsize(path))
                    # tack on a URL attribute so the parent Widget thinks it
                    # has an initial value
                    upload.url = settings.MEDIA_URL + os.path.relpath(upload.file.name, settings.MEDIA_ROOT)

            self._cached_value = upload

        return self._cached_value
Пример #9
0
def upload_save_process(request):
    """
        save file into local storage
    """
    f = request.FILES["file"]
    wrapper_f = UploadedFile(f)

    name, filetype = split_name(wrapper_f.name)

    obj = ProcessedFile()
    obj.title = name + str(uuid.uuid4()) + "." + filetype
    wrapper_f.name = obj.title
    obj.file_obj = f
    obj.file_type = filetype if filetype != " " else "unknown"
    obj.save()

    return wrapper_f
Пример #10
0
def smart_load_from_upload(classname: str, f: UploadedFile) -> BasePriceList:
    '''
    Attempt to intelligently load the given Django UploadedFile,
    interpreting it as a price list for the given schedule class name.

    If interpreting it under the preferred schedule results in either
    a ValidationError or no valid rows, attempts will be made to
    re-interpret the file under different schedules. The first found
    that yields better interpretations of the data will be returned.

    If no better matches are found, the original result or exception
    (from interpreting the data under the preferred price list) will
    be returned.
    '''

    original_error = None
    pricelist: Optional[BasePriceList] = None

    try:
        pricelist = load_from_upload(classname, f)
    except ValidationError as e:
        original_error = e

    if original_error or (pricelist and not pricelist.valid_rows):
        # See if any of our other registered schedules can make better sense
        # of it.
        next_best_pricelist = None
        for fallback, _ in CHOICES:
            if fallback == classname:
                continue
            try:
                f.seek(0)
                next_best_pricelist = load_from_upload(fallback, f)
                if next_best_pricelist.valid_rows:
                    pricelist = next_best_pricelist
                    break
            except ValidationError as e:
                pass

    if pricelist is None:
        default_error = ValidationError('Unrecognized price list!')
        raise original_error or default_error

    return pricelist
Пример #11
0
 def parse_file(self, file: UploadedFile, _) -> Iterable[Article]:
     data = file.read()
     try:
         for para in self.split_file(data):
             yield self.parse_document(para)
     except ApaError:
         log.error("APA parse attempt failed.")
         if settings.DEBUG:
             log.error("The generated HTML can be found in /tmp/apa_unrtf.html")
         raise
Пример #12
0
def fileupload(request):
    if request.method == 'POST':
        
        cc = request.POST.get('cc');

        myexcel = request.FILES['files[]']
        excel_obj = UploadedFile(myexcel)
        workbook = xlrd.open_workbook(file_contents = excel_obj.read())
        all_worksheets = workbook.sheet_names()
        
        worksheet_name = all_worksheets[0]
        worksheet = workbook.sheet_by_name(worksheet_name)
        
        for rownum in xrange(worksheet.nrows):
            tmp = []
            for entry in worksheet.row_values(rownum):
                tmp.append(entry)
            print tmp
        
        return JsonResponse({'status': 'fileupload_ok'})
Пример #13
0
    def file_complete(self, file_size):
        mp_file_size = sum([part.size for part in self.mp])

        if file_size > self.max_file_size or mp_file_size > self.max_file_size:
            raise FileExceedsSizeLimit

        if file_size != mp_file_size:
            raise Exception("Uploaded file size doesn't match computed file size.")

        self.mp.complete_upload()

        # save local metadata
        self.row.size = file_size
        self.row.save()
        self.row.releaseLock()

        uploaded_file = UploadedFile()
        uploaded_file.download_key = self.row.getKey()
        uploaded_file.name = self.row.filename

        return uploaded_file
Пример #14
0
    def _decode_files(self, files):
        """
        Helper method that when given *files* -- a ``dict`` with the
        structure::

            {
                "<field_name>": {
                    "file_storage_key": "<unicode>",
                    "name": "<unicode>",
                    "content_type": "<unicode>",
                    "size": "<int>",
                    "charset": "<unicode>",
                },
                ...
            }

        a new ``dict`` it returned with the structure::

            {
                "<field_name>": <UploadedFile object>,
                ...
            }

        """
        if files is None:
            return None
        decoded = {}
        for name, data in files.iteritems():
            key = data.pop('file_storage_key')
            uploaded_file = UploadedFile(file=self.file_storage.open(key),
                                         **data)
            # In order to ensure that files aren't repeatedly saved to the file
            # storage, the filename of each file in the file storage is added
            # to ``UploadedFile`` objects as a ``_wizard_file_storage_key``
            # attribute when they're decoded. This acts as a marker to indicate
            # that the file already exists in the file storage.
            uploaded_file._wizard_file_storage_key = key
            decoded[name] = uploaded_file
        return decoded
Пример #15
0
def save_document(request_file, content_subdir, related_obj, ashash = True):
	uploadedfile = UploadedFile(request_file)
	file_content = uploadedfile.read()
	doc_obj = Document()
	doc_obj.filehash = md5(file_content).hexdigest()
	doc_obj.urlencfilename = quote(uploadedfile.name)
	doc_obj.filename = uploadedfile.name
	doc_obj.content_type = uploadedfile.file.content_type
	if ashash:
		doc_obj.filepath = settings.BASE_DIR + content_subdir + doc_obj.filehash
	else:
		doc_obj.filepath = settings.BASE_DIR + content_subdir + doc_obj.filename
	if related_obj.__class__.__name__.lower() == "queryset":
		if len(related_obj) == 1:
			setattr(doc_obj, related_obj[0].__class__.__name__.lower(), related_obj[0])
		else:
			print "ERROR: The queryset object had %s elements to it" % str(len(related_obj))
	else:
		setattr(doc_obj, related_obj.__class__.__name__.lower(), related_obj)
	doc_obj.save()

	wfile = open(doc_obj.filepath, "w")
	wfile.write(file_content)
	wfile.close()
Пример #16
0
def upload(req):
    if req.method == 'POST':
        if 'file' in req.FILES:
            file = req.FILES['file']
            filename = file._name
            
            image = UploadedFile.read(file)
            print image
            # apps.ImageTrans.getGrayscale(file)
            image2 = cv2.imread(image,0)
            print image2
            
            fp = open('%s/%s' % ('image/data', filename) , 'wb')
            for chunk in file.chunks():
                fp.write(chunk)
            fp.close()
            return HttpResponse('File Uploaded')
    return HttpResponse('Failed to Upload File')
Пример #17
0
    def seed_instances(self, source_version, form, periods, mapping_version, fixed_instance_count=None):
        for org_unit in source_version.orgunit_set.all():
            instances = []
            for period in periods:
                if fixed_instance_count and "Clinic" in org_unit.name:
                    instance_by_ou_periods = randint(1, fixed_instance_count)
                else:
                    instance_by_ou_periods = 2 if randint(1, 100) == 50 else 1

                with_location = randint(1, 3) == 2
                # print("generating", form.name, org_unit.name, instance_by_ou_periods)
                for instance_count in range(0, instance_by_ou_periods):
                    instance = Instance(project=self.project)
                    instance.created_at = parse_datetime("2018-02-16T11:00:00+00")
                    instance.org_unit = org_unit
                    instance.period = period
                    instance.file_name = "fake_it_until_you_make_it.xml"
                    instance.uuid = str(uuid4())
                    if with_location:
                        instance.location = Point(-11.7868289 + (2 * random()), 8.4494988 + (2 * random()), 0)

                    test_data = {"_version": 1}

                    if mapping_version and "question_mappings" in mapping_version.json:
                        # quality or quantity
                        for key in mapping_version.json["question_mappings"]:
                            test_data[key] = randint(1, 10)
                    else:
                        # CVS
                        test_data["cs_304"] = randint(1, 100)

                    instance.json = test_data
                    instance.form = form

                    if mapping_version.mapping.is_event_tracker():
                        instance.json.clear()
                        instance.json = {
                            "DE_2005736": "2.5",
                            "DE_2006098": "5",
                            "DE_2006101": "1",
                            "DE_2006103": "Exclusive",
                            "DE_2006104": "3",
                            "DE_2008294": "NVP only",
                            "DE_391382": "dsd",
                            "DE_424405": "",
                            "MMD_PER_NAM": "kid " + str(randint(1, 10000)),
                            "gender": "Male" if randint(1, 10) < 5 else "Female",
                            "is_existing": "0",
                            "last_name": "Skywalker",
                            "_version": 1,
                            "households_note": "",
                            "hh_repeat": [
                                {
                                    "hh_name": "household 1",
                                    "hh_gender": "Male" if randint(1, 10) < 5 else "Female",
                                    "hh_age": randint(18, 65),
                                    "hh_street": "streeet 1",
                                    "hh_number": "44b",
                                    "hh_city": "bxl",
                                },
                                {
                                    "hh_name": "household 2",
                                    "hh_gender": "Male" if randint(1, 10) < 5 else "Female",
                                    "hh_age": randint(18, 65),
                                    "hh_street": "street b",
                                    "hh_number": "45",
                                    "hh_city": "Namur",
                                },
                            ],
                            "instanceID": "uuid:" + instance.uuid,
                        }

                        self.generate_xml_file(instance, form.latest_version)
                    else:
                        instance.json["instanceID"] = "uuid:" + str(uuid4())
                        xml_string = (
                            open("./testdata/seed-data-command-instance.xml")
                            .read()
                            .replace("REPLACEUUID", instance.json["instanceID"])
                        )
                        buffer = BytesIO(xml_string.encode("utf-8"))
                        buffer.seek(0, 2)
                        file = InMemoryUploadedFile(
                            file=buffer,
                            field_name="file",
                            name=instance.file_name,
                            content_type="application/xml",
                            size=buffer.tell(),
                            charset="utf-8",
                        )

                        instance.file = file

                        UploadedFile()

                    instances.append(instance)
            Instance.objects.bulk_create(instances)
Пример #18
0
 def _save(self, name, content):
     name = self._normalise_name(name)
     name = self._prepend_prefix(name)
     content = UploadedFile(content, name)
     response = self._upload(name, content)
     return response['public_id']
Пример #19
0
 def test_clean_valid_mime(self):
     form = ResumableForm()
     f = UploadedFile(file=None, name="sound.ogg", content_type="audio/ogg")
     self.assertEqual(f, form.fields.get('file').clean(None, f))
Пример #20
0
    def test_case(self):
        response = self.client.post(reverse('polygon:revision_case_create',
                                            kwargs=self.kwargs),
                                    data={
                                        "option": "text",
                                        "input_text": "this is input",
                                        "output_text": "this is output",
                                        "in_samples": False,
                                        "output_lock": False,
                                        "case_number": 0,
                                        "activated": True
                                    })
        self.assertEqual(self.revision.cases.count(), 1)
        my_case = self.revision.cases.first()
        self.assertNotEqual(my_case.fingerprint, "invalid")
        self.assertEqual(my_case.input_preview, "this is input")
        self.assertEqual(my_case.output_preview, "this is output")
        self.assertEqual(my_case.case_number, 1)

        response = self.client.post(
            reverse('polygon:revision_case_create', kwargs=self.kwargs),
            data={
                "option": "single",
                "input_file": SimpleUploadedFile("input", b"this is input 3"),
                "output_file": SimpleUploadedFile("output",
                                                  b"this is output 3"),
                "case_number": 3,
                "activated": True
            })
        self.assertEqual(self.revision.cases.count(), 2)
        my_case = self.revision.cases.get(pk=2)
        self.assertNotEqual(my_case.fingerprint, "invalid")
        self.assertEqual(my_case.input_preview, "this is input 3")
        self.assertEqual(my_case.output_preview, "this is output 3")
        self.assertEqual(my_case.case_number, 3)

        zip_location = path.join(BASE_LOCATION, "asset", "d.zip")
        with open(zip_location, "rb") as f:
            response = self.client.post(reverse('polygon:revision_case_create',
                                                kwargs=self.kwargs),
                                        data={
                                            "option": "batch",
                                            "batch_file": UploadedFile(f),
                                            "case_number": 2,
                                            "activated": True
                                        })
        self.assertEqual(self.revision.cases.count(), 4)
        mmap = {1: 1, 2: 4, 3: 2, 4: 3, 5: 4}
        input_map = {1: "this", 2: "this", 3: "1 2", 4: "2 2", 5: "this"}
        for case in self.revision.cases.all():
            self.assertNotEqual(case.pk, 2)
            self.assertEqual(mmap[case.pk], case.case_number)
            self.assertIn(input_map[case.pk], case.input_preview)

        with open(zip_location, "rb") as f:
            response = self.client.post(reverse('polygon:revision_case_create',
                                                kwargs=self.kwargs),
                                        data={
                                            "option": "batch",
                                            "batch_file": UploadedFile(f),
                                            "case_number": 0,
                                            "activated": True
                                        })
        self.assertEqual(self.revision.cases.count(), 6)
        self.assertEqual(
            len(
                set(self.revision.cases.all().values_list("fingerprint",
                                                          flat=True))), 4)

        # update case
        p_kwargs = {"cpk": 1}
        p_kwargs.update(self.kwargs)
        response = self.client.post(reverse(
            'polygon:revision_case_update_file', kwargs=p_kwargs),
                                    data={
                                        "option": "text",
                                        "input_text": "new input",
                                        "output_text": "new output"
                                    })
        # print(response, response.__dict__)
        self.assertEqual(self.revision.cases.count(), 6)
        for case in self.revision.cases.all():
            # print(case.pk, case.parent_id)
            self.assertNotEqual(case.pk, 1)
Пример #21
0
def calculate_checksum(uploaded_file: UploadedFile) -> str:
    md5 = hashlib.md5()
    for chunk in uploaded_file.chunks():
        md5.update(chunk)
    return md5.hexdigest()
Пример #22
0
def accept_uploaded_photo(request, album_id):
    """
    Main Multiuploader module.
    Parses data from jQuery plugin and makes database changes.
    """
    if request.method == 'POST':
        logid = random.randint(0,1000)
        log.info('[%s] received POST to main multiuploader view' % logid)
        if request.FILES == None:
            return HttpResponseBadRequest('Must have files attached!')

        #getting file data for farther manipulations
        file = request.FILES[u'files[]']
        wrapped_file = UploadedFile(file)
        filename = wrapped_file.name
        file_size = wrapped_file.file.size
        log.info ('[%s] Got file: "%s"' % (logid, str(filename)))

        # Write out file to disk as a temp file
        randnumber = logid # use the random number here too
        temp_filename = '%stmp%s_%s' % (settings.TEMP_DIRECTORY,randnumber, filename)
        log.info('[%s] Writing out to: %s' % (logid, temp_filename))
        destination = open(temp_filename, 'wb+')
        if wrapped_file.multiple_chunks():
            for chunk in wrapped_file.chunks():
                destination.write(chunk)
        else:
            destination.write(wrapped_file.read())
        destination.close()

        # Dump out EXIF Tags
#        im = Image.open(temp_filename)
#        if hasattr( im, '_getexif' ):
#            exifinfo = im._getexif()
#            if exifinfo:
#                for tag, value in exifinfo.items():
#                    decoded = ExifTags.TAGS.get(tag, tag)
#                    log.info('Found tag: %s, value: %s' % (decoded,value))

        orientation = None
        date_taken = None
        # Make full size and thumbsize
        try:
            im = Image.open(temp_filename)
        except IOError:
            log.info('[%s] Error opening file %s: %s %s' % (logid, temp_filename, e.errno, e))
            return HttpResponseBadRequest('Could not read file')

        if hasattr( im, '_getexif' ):
            exifinfo = im._getexif()
            if exifinfo:
                for tag, value in exifinfo.items():
                    decoded = ExifTags.TAGS.get(tag, tag)
#                    if decoded != 'MakerNote':
#                        if decoded != 'UserComment':
#                            log.info('Found tag: %s, value: %s' % (decoded,value))
                    if decoded == 'Orientation':
                        orientation = value
                        log.info('[%s] Found tag: %s, value: %s' % (logid,decoded,value))
                    elif decoded == 'DateTime':
                        date_taken =  datetime.strptime(value, "%Y:%m:%d %H:%M:%S")
                        log.info('[%s] Found tag: %s, value: %s, date_taken=%s' % (logid,decoded,value,date_taken))

        # We rotate regarding to the EXIF orientation information
        if orientation:
            if orientation == 1:
                # Nothing
                log.info('[%s] Orientation: No rotation necessary' % logid)
                pass
            elif orientation == 2:
                # Vertical Mirror
                log.info('[%s] Orientation: Vertical flip' % logid)
                im = im.transpose(Image.FLIP_LEFT_RIGHT)
            elif orientation == 3:
                # Rotation 180
                log.info('[%s] Orientation: Rotation 180' % logid)
                im = im.transpose(Image.ROTATE_180)
            elif orientation == 4:
                # Horizontal Mirror
                log.info('[%s] Orientation: Horizontal Mirror' % logid)
                im = im.transpose(Image.FLIP_TOP_BOTTOM)
            elif orientation == 5:
                # Horizontal Mirror + Rotation 270
                log.info('[%s] Orientation: Flip top bottom, rot 270' % logid)
                im = im.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_270)
            elif orientation == 6:
                # Rotation 270
                log.info('[%s] Orientation: Rotate 270' % logid)
                im = im.transpose(Image.ROTATE_270)
            elif orientation == 7:
                # Vertical Mirror + Rotation 270
                log.info('[%s] Orientation: Flip left right, rotate 270' % logid)
                im = im.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)
            elif orientation == 8:
                # Rotation 90
                log.info('[%s] Orientation: Rotate 90' % logid)
                im = im.transpose(Image.ROTATE_90)

        #------------------
        # Save the transposed image to disk
        orig_path = '%stmp%s_mod%s' % (settings.TEMP_DIRECTORY,randnumber, filename)
        # keep 100% fidelity on the image
        try:
            log.info('[%s] Writing corrected photo to path %s' % (logid,orig_path))
            im.save(orig_path, "JPEG", quality=100)
        except IOError:
            log.info('[%s] Error saving file %s: %s %s' % (logid, orig_path, e.errno, e))
            return HttpResponseBadRequest('Could not save file')

        #------------------
        # Save the photo object into the database
        album = Album.objects.get(id=album_id)
        photo = Photo()
        photo.album = album

        log.info('[%s] Determining photo order' % logid)
        #------------------
        # Determine where in the photo order this picture needs to be
        photo.order = 0
        if date_taken:
            photo.photodate = date_taken
            log.info('[%s] Date Taken is %s' % (logid,date_taken))
            # Now try to insert the photo by date taken in the order list
            prev_photo = photo.prev_photo_by_photodate()
            if prev_photo:
                log.info('got prev photo.  id=%s, photodate=%s, order=%s' % (prev_photo.id,prev_photo.photodate,prev_photo.order))
                photo.order = prev_photo.order
            else:
                # First in album
                photo.order = 0
        else:
            # Last in album
            photo.order = album.photo_set.count() + 1

        log.info('[%s] Writing photo entry to database' % logid)
        #------------------
        # Now finally write the entry to the db
        photo.save()
        log.info('[%s] Photo object saved.  id = %s, order = %s' % (logid, photo.id,photo.order))
        #album.reorder_photos()

        log.info('[%s] Attempting to save file %s to django model id %s' % (logid, orig_path, photo.id))
        f = open(orig_path, 'r')
        photo.filename.save('%s.jpg' % photo.id, File(f))
        f.close()

        log.info('[%s] Cleaning up files' % logid)
        #clean up temp file
        unlink(temp_filename)
        unlink(orig_path)

        #settings imports
        file_delete_url = 'multi_delete/'

        thumbnail_options = dict(size=(200, 200), crop=True)
        thumb_url = get_thumbnailer(photo.filename).get_thumbnail(thumbnail_options).url

        #generating json response array
        result = []
        result.append({"name":filename,
                       "size":file_size,
                       "url": thumb_url,
                       "thumbnail_url":thumb_url,
                       "delete_url":'/',
                       "delete_type":"POST",})
        response_data = simplejson.dumps(result)

        #checking for json data type
        #big thanks to Guy Shapiro
        if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
            mimetype = 'application/json'
        else:
            mimetype = 'text/plain'
        return HttpResponse(response_data, mimetype=mimetype)
    else: #GET
        return HttpResponse('Only POST accepted')
Пример #23
0
def handle_files(files, time, board):
    """
    Check and save files.

    :param files: files fot handling
    :param time: current time
    :param board: post's board
    :return: json list of files features
    """
    _files = []
    for file in files.items():
        size = file[1].size
        if size > config['max_filesize']:
            return False
        name = file[1].name
        ext = name.split('.')[-1]
        if not ext.lower() in config['allowed_ext']:
            return False

        # file saving
        index = file[0].replace('file', '')  # equal 0 for first file and so on
        path = choose_path(board, 'src', time, ext, index)

        with open(path, 'wb+') as destination:
            for chunk in file[1].chunks():
                destination.write(chunk)
        destination.close()

        # TODO: Refactor all this hell

        if ext.lower() == 'webm':
            temp_file = NamedTemporaryFile()
            temp_path = temp_file.name + '.png'
            call(["ffmpeg", "-i", path, "-vframes", "1", temp_path])
            temp_file.close()
            temp_th = open(temp_path, 'rb+')
            preview = UploadedFile(file=temp_th)
            thumb = make_thumb(preview)
            preview.close()
            image = Image.open(temp_path)
        else:
            image = Image.open(path)
            thumb = make_thumb(file[1])

        path = choose_path(board, 'thumb', time, 'jpg', index)

        destination = open(path, 'wb+')
        destination.write(thumb.read())
        destination.close()

        thumb = Image.open(path)

        filename = '{0}-{1}.{2}'.format(time, index, ext)

        file_data = {
            "name": name,
            "type": 0,  # content_type,
            "tmp_name": ".",  # ???
            "error": 0,
            "size": size,
            "filename": name,
            "extension": ext,
            "file_id": time,
            "file": filename,
            "thumb": '{0}-{1}.jpg'.format(time, index),
            "is_an_image": 0,  # content_type.split('/')[0] == 'image',
            "hash": "c5c76d11ff82103d18c3c9767bcb881e",  # TODO hash
            "width": image.width,
            "height": image.height,
            "thumbwidth": thumb.width,
            "thumbheight": thumb.height,
            "file_path": '{0}/src/{1}'.format(board, filename),
            "thumb_path": '{0}/thumb/{1}-{2}.jpg'.format(board, time, index)
        }
        image.close()
        thumb.close()
        _files.append(file_data)
    return _files
Пример #24
0
 def mock_upload(filename):
     with open(get_testing_img_path(filename), 'rb') as img:
         uploaded_img = UploadedFile(img, 'test_img.png')
         return models.Illustration.upload_illustration(uploaded_img)
    def test_shp_set_file_type_to_geo_feature_required(self):
        # here we are using a shp file for setting it
        # to Geo Feature file type which includes metadata extraction

        self._create_composite_resource()

        # add the 3 required files to the resource
        files = []
        shp_temp_file = os.path.join(self.temp_dir, self.states_shp_file_name)
        shutil.copy(self.states_shp_file, shp_temp_file)

        shx_temp_file = os.path.join(self.temp_dir, self.states_shx_file_name)
        shutil.copy(self.states_shx_file, shx_temp_file)

        dbf_temp_file = os.path.join(self.temp_dir, self.states_dbf_file_name)
        shutil.copy(self.states_dbf_file, dbf_temp_file)

        files.append(
            UploadedFile(file=open(shp_temp_file, 'r'),
                         name=self.states_shp_file_name))
        files.append(
            UploadedFile(file=open(shx_temp_file, 'r'),
                         name=self.states_shx_file_name))
        files.append(
            UploadedFile(file=open(dbf_temp_file, 'r'),
                         name=self.states_dbf_file_name))
        hydroshare.utils.resource_file_add_process(self.composite_resource,
                                                   files, self.user)

        self.assertEqual(self.composite_resource.files.all().count(), 3)
        res_file = self.composite_resource.files.first()
        expected_folder_name = res_file.file_name[:-4]
        # check that the resource file is associated with GenericLogicalFile
        self.assertEqual(res_file.has_logical_file, True)
        self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
        # check that there is 3 GenericLogicalFile object
        self.assertEqual(GenericLogicalFile.objects.count(), 3)

        # set the shp file to GeoFeatureFile type
        shp_res_file = [
            f for f in self.composite_resource.files.all()
            if f.extension == '.shp'
        ][0]
        GeoFeatureLogicalFile.set_file_type(self.composite_resource,
                                            shp_res_file.id, self.user)

        # test files in the file type
        self.assertEqual(self.composite_resource.files.count(), 3)
        # check that there is no GenericLogicalFile object
        self.assertEqual(GenericLogicalFile.objects.count(), 0)
        # check that there is one GeoFeatureLogicalFile object
        self.assertEqual(GeoFeatureLogicalFile.objects.count(), 1)
        logical_file = GeoFeatureLogicalFile.objects.first()
        self.assertEqual(logical_file.files.count(), 3)
        # check that the 3 resource files are now associated with GeoFeatureLogicalFile
        for res_file in self.composite_resource.files.all():
            self.assertEqual(res_file.logical_file_type_name,
                             "GeoFeatureLogicalFile")
            self.assertEqual(res_file.has_logical_file, True)
            self.assertTrue(
                isinstance(res_file.logical_file, GeoFeatureLogicalFile))
        # check that we put the 3 files in a new folder
        for res_file in self.composite_resource.files.all():
            file_path, base_file_name, _ = get_resource_file_name_and_extension(
                res_file)
            expected_file_path = "{}/data/contents/{}/{}"
            res_file.file_folder = expected_folder_name
            expected_file_path = expected_file_path.format(
                self.composite_resource.root_path, expected_folder_name,
                base_file_name)
            self.assertEqual(file_path, expected_file_path)
        # test extracted raster file type metadata
        # there should not be any resource level coverage
        self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
        self.assertNotEqual(logical_file.metadata.geometryinformation, None)
        self.assertEqual(
            logical_file.metadata.geometryinformation.featureCount, 51)
        self.assertEqual(
            logical_file.metadata.geometryinformation.geometryType,
            "MULTIPOLYGON")

        self.assertNotEqual(logical_file.metadata.originalcoverage, None)
        self.assertEqual(logical_file.metadata.originalcoverage.datum,
                         'unknown')
        self.assertEqual(
            logical_file.metadata.originalcoverage.projection_name, 'unknown')
        self.assertGreater(
            len(logical_file.metadata.originalcoverage.projection_string), 0)
        self.assertEqual(logical_file.metadata.originalcoverage.unit,
                         'unknown')
        self.assertEqual(logical_file.metadata.originalcoverage.eastlimit,
                         -66.9692712587578)
        self.assertEqual(logical_file.metadata.originalcoverage.northlimit,
                         71.406235393967)
        self.assertEqual(logical_file.metadata.originalcoverage.southlimit,
                         18.921786345087)
        self.assertEqual(logical_file.metadata.originalcoverage.westlimit,
                         -178.217598362366)

        # there should not be any file level keywords
        self.assertEqual(logical_file.metadata.keywords, [])

        self.composite_resource.delete()
        # there should be no GeoFeatureLogicalFile object at this point
        self.assertEqual(GeoFeatureLogicalFile.objects.count(), 0)
        # there should be no GenericFileMetaData object at this point
        self.assertEqual(GeoFeatureFileMetaData.objects.count(), 0)
Пример #26
0
def multiuploader(request):
    """
    Main Multiuploader module.
    Parses data from jQuery plugin and makes database changes.
    """
    if request.method == 'POST':
        log.info('received POST to main multiuploader view')
        if request.FILES == None:
            return HttpResponseBadRequest('Must have files attached!')

        #getting file data for farther manipulations
        file = request.FILES[u'files[]']
        wrapped_file = UploadedFile(file)
        filename = wrapped_file.name
        file_size = wrapped_file.file.size
        log.info ('Got file: "%s"' % str(filename))
        log.info('Content type: "$s" % file.content_type')

        #writing file manually into model
        #because we don't need form of any type.
        image = MultiuploaderImage()
        image.filename=str(filename)
        image.image=file
        image.key_data = image.key_generate
        image.save()
        log.info('File saving done')

        #getting thumbnail url using sorl-thumbnail
        if 'image' in file.content_type.lower():
            im = get_thumbnail(image, "80x80", quality=50)
            thumb_url = im.url
        else:
            thumb_url = ''

        #settings imports
        try:
            file_delete_url = settings.MULTI_FILE_DELETE_URL+'/'
            file_url = settings.MULTI_IMAGE_URL+'/'+image.key_data+'/'
        except AttributeError:
            file_delete_url = 'multi_delete/'
            file_url = 'multi_image/'+image.key_data+'/'

        """
        is actually: [{"name": "Screenshot from 2012-11-14 16:17:46.png", "url": "multi_image/95925526541943247735000327303075602114185579370918344597903504067450818566531/", "thumbnail_url": "/media/cache/f8/bd/f8bd83aadeba651ff9c040bb394ce117.jpg", "delete_type": "POST", "delete_url": "multi_delete/7/", "size": 38520}]
        should be:   {"files":[{"url":"http://jquery-file-upload.appspot.com/AMIfv9734HSTDGd3tIybbnKVru--IjhjULKvNcIGUL2lvfqA93RNCAizDbvP-RQJNbh-N9m8UXsk-90jFFYSp8TlbZYhEcNN6Vb9HzQVQtdmF83H6sE_XkdnlI2V8lHX5V3Y4AamdX6VMbAt9sNWNx2BVGzhTfAYkRLYmRE1VzzWSe9C8c8Fu8g/Screenshot%20from%202012-11-14%2016%3A17%3A46.png","thumbnail_url":"http://lh5.ggpht.com/fcjVNT6qUGoMDtqqaNDNtU4mghy34qlzfj2GujikLgC7Nj5Bs4LUT_DWG_Q8OWujqvYHsKbeQ9pkvoAW4WiaubmqQxobIPyt=s80","name":"Screenshot from 2012-11-14 16:17:46.png","type":"image/png","size":38520,"delete_url":"http://jquery-file-upload.appspot.com/AMIfv9734HSTDGd3tIybbnKVru--IjhjULKvNcIGUL2lvfqA93RNCAizDbvP-RQJNbh-N9m8UXsk-90jFFYSp8TlbZYhEcNN6Vb9HzQVQtdmF83H6sE_XkdnlI2V8lHX5V3Y4AamdX6VMbAt9sNWNx2BVGzhTfAYkRLYmRE1VzzWSe9C8c8Fu8g/Screenshot%20from%202012-11-14%2016%3A17%3A46.png?delete=true","delete_type":"DELETE"}]}
        """

        #generating json response array
        result = {
            'files': [ {"name":filename, 
                       "size":file_size, 
                       "url":file_url, 
                       "thumbnail_url":thumb_url,
                       "delete_url":file_delete_url+str(image.pk)+'/', 
                       "delete_type":"POST",}
                    ]
        }
        response_data = simplejson.dumps(result)
        
        #checking for json data type
        #big thanks to Guy Shapiro
        if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
            mimetype = 'application/json'
        else:
            mimetype = 'text/plain'
        return HttpResponse(response_data, mimetype=mimetype)
    else: #GET
        return HttpResponse('Only POST accepted')
Пример #27
0
def multiuploader(request, noajax=False):
    """
    Main Multiuploader module.
    Parses data from jQuery plugin and makes database changes.
    """

    if request.method == 'POST':
        log.info('received POST to main multiuploader view')

        if request.FILES is None:
            response_data = [{"error": _('Must have files attached!')}]
            return HttpResponse(json.dumps(response_data))

        if not u'form_type' in request.POST:
            response_data = [{
                "error":
                _("Error when detecting form type, form_type is missing")
            }]
            return HttpResponse(json.dumps(response_data))

        signer = Signer()

        try:
            form_type = signer.unsign(request.POST.get(u"form_type"))
        except BadSignature:
            response_data = [{"error": _("Tampering detected!")}]
            return HttpResponse(json.dumps(response_data))

        form = MultiUploadForm(request.POST,
                               request.FILES,
                               form_type=form_type)

        if not form.is_valid():
            error = _("Unknown error")

            if "file" in form._errors and len(form._errors["file"]) > 0:
                error = form._errors["file"][0]

            response_data = [{"error": error}]
            return HttpResponse(json.dumps(response_data))

        file = request.FILES[u'file']
        wrapped_file = UploadedFile(file)
        filename = wrapped_file.name
        file_size = wrapped_file.file.size

        log.info('Got file: "%s"' % filename)

        #writing file manually into model
        #because we don't need form of any type.

        fl = MultiuploaderFile()
        fl.filename = filename
        fl.file = file
        fl.save()

        log.info('File saving done')

        thumb_url = ""

        try:
            im = get_thumbnail(fl.file, "80x80", quality=50)
            thumb_url = im.url
        except Exception as e:
            log.error(e)

        #generating json response array
        result = [{
            "id": fl.id,
            "name": filename,
            "size": file_size,
            "url": reverse('multiuploader_file_link', args=[fl.pk]),
            "thumbnail_url": thumb_url,
            "delete_url": reverse('multiuploader_delete', args=[fl.pk]),
            "delete_type": "POST",
        }]

        response_data = json.dumps(result)

        #checking for json data type
        #big thanks to Guy Shapiro

        if noajax:
            if request.META['HTTP_REFERER']:
                redirect(request.META['HTTP_REFERER'])

        if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
            mimetype = 'application/json'
        else:
            mimetype = 'text/plain'
        return HttpResponse(response_data,
                            content_type="{0}; charset=utf-8".format(mimetype))
    else:  # GET
        return HttpResponse('Only POST accepted')
Пример #28
0
    def partial_update(self, request, *args, **kwargs):
        """
        Transfer file data
        """

        # PATCHED: Validate upload session url kwarg
        upload_session_id = kwargs.get('upload_session_id')
        try:
            UploadSession.objects.get(pk=upload_session_id)
        except (UploadSession.DoesNotExist, ValidationError):
            return Response('Wrong upload session uid.',
                            status=status.HTTP_400_BAD_REQUEST)

        # Validate tus header
        if not has_required_tus_header(request):
            return Response('Missing "{}" header.'.format('Tus-Resumable'),
                            status=status.HTTP_400_BAD_REQUEST)

        # Validate content type
        if not self._is_valid_content_type(request):
            return Response(
                'Invalid value for "Content-Type" header: {}. Expected "{}".'.
                format(request.META['CONTENT_TYPE'],
                       TusUploadStreamParser.media_type),
                status=status.HTTP_400_BAD_REQUEST)

        # Retrieve object
        upload = self.get_object()

        # Get upload_offset
        upload_offset = getattr(request, constants.UPLOAD_OFFSET_NAME)

        # Validate upload_offset
        if upload_offset != upload.upload_offset:
            raise Conflict

        # Make sure there is a tempfile for the upload
        assert upload.get_or_create_temporary_file()

        # Change state
        if upload.state == states.INITIAL:
            upload.start_receiving()
            upload.save()

        # Get chunk from request
        chunk_bytes = self.get_chunk(request)

        # Check for data
        if not chunk_bytes:
            return Response('No data.', status=status.HTTP_400_BAD_REQUEST)

        # Check checksum  (http://tus.io/protocols/resumable-upload.html#checksum)
        upload_checksum = getattr(request,
                                  constants.UPLOAD_CHECKSUM_FIELD_NAME, None)
        if upload_checksum is not None:
            if upload_checksum[0] not in tus_api_checksum_algorithms:
                return Response('Unsupported Checksum Algorithm: {}.'.format(
                    upload_checksum[0]),
                                status=status.HTTP_400_BAD_REQUEST)
            elif not checksum_matches(upload_checksum[0], upload_checksum[1],
                                      chunk_bytes):
                return Response('Checksum Mismatch.', status=460)

        # Run chunk validator
        chunk_bytes = self.validate_chunk(upload_offset, chunk_bytes)

        # Check for data
        if not chunk_bytes:
            return Response(
                'No data. Make sure "validate_chunk" returns data.',
                status=status.HTTP_400_BAD_REQUEST)

        # Write file
        chunk_size = int(request.META.get('CONTENT_LENGTH', 102400))
        try:
            upload.write_data(chunk_bytes, chunk_size)
        except Exception as e:
            return Response(str(e), status=status.HTTP_400_BAD_REQUEST)

        headers = {
            'Upload-Offset': upload.upload_offset,
        }

        response_data = None

        if upload.upload_length == upload.upload_offset:
            # PATCHED: re-send request to our native upload() method
            from apps.project.api.v1 import UploadSessionViewSet

            file = UploadedFile(file=open(upload.temporary_file_path, 'rb'),
                                name=upload.filename,
                                size=upload.upload_length)
            request._files = MultiValueDict()
            request._files['file'] = file

            try:
                directory_path = os.path.dirname(
                    json.loads(upload.upload_metadata)['relativePath'])
            except (KeyError, TypeError, json.JSONDecodeError):
                directory_path = None

            response = UploadSessionViewSet(request=request,
                                            format_kwarg=upload_session_id,
                                            action='upload',
                                            kwargs={
                                                'pk': upload_session_id
                                            }).upload(
                                                request=request,
                                                pk=upload_session_id,
                                                review_file=False,
                                                directory_path=directory_path)
            if response.status_code != 200:
                return response
            response_data = response.data

            # Trigger signal
            signals.received.send(sender=upload.__class__, instance=upload)

        # Add upload expiry to headers
        add_expiry_header(upload, headers)

        return Response(data=response_data,
                        headers=headers,
                        status=status.HTTP_204_NO_CONTENT)
Пример #29
0
    def test_metadata_on_content_file_delete(self):
        # test that some of the metadata is not deleted on content file deletion
        files = [
            UploadedFile(file=self.raster_tif_file_obj,
                         name=self.raster_tif_file_name)
        ]
        utils.resource_file_add_pre_process(resource=self.resRaster,
                                            files=files,
                                            user=self.user,
                                            extract_metadata=False)

        utils.resource_file_add_process(resource=self.resRaster,
                                        files=files,
                                        user=self.user,
                                        extract_metadata=True)
        # there should be 2 content files
        self.assertEqual(self.resRaster.files.all().count(), 2)

        # there should be 2 format elements
        self.assertEqual(self.resRaster.metadata.formats.all().count(), 2)
        self.assertEqual(
            self.resRaster.metadata.formats.all().filter(
                value='application/vrt').count(), 1)

        self.assertEqual(
            self.resRaster.metadata.formats.all().filter(
                value='image/tiff').count(), 1)

        # delete content file that we added above
        hydroshare.delete_resource_file(self.resRaster.short_id,
                                        self.raster_tif_file_name, self.user)

        # there should no content file
        self.assertEqual(self.resRaster.files.all().count(), 0)

        # there should be a title element
        self.assertNotEqual(self.resRaster.metadata.title, None)

        # there should be no abstract element
        self.assertEqual(self.resRaster.metadata.description, None)

        # there should be 1 creator element
        self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)

        # there should be no contributor element
        self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)

        # there should be no coverage element
        self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)

        # there should be no format element
        self.assertEqual(self.resRaster.metadata.formats.all().count(), 0)

        # there should be no subject element
        self.assertEqual(self.resRaster.metadata.subjects.all().count(), 0)

        # testing extended metadata elements - there should not be any resource specific metadata
        self.assertEqual(self.resRaster.metadata.originalCoverage, None)

        self.assertEqual(self.resRaster.metadata.cellInformation, None)
        self.assertEqual(self.resRaster.metadata.bandInformations.count(), 0)

        self.resRaster.delete()
Пример #30
0
def netcdf_pre_add_files_to_resource(sender, **kwargs):
    nc_res = kwargs['resource']
    files = kwargs['files']
    validate_files_dict = kwargs['validate_files']
    source_names = kwargs['source_names']

    if __debug__:
        assert(isinstance(source_names, list))

    if len(files) > 1:
        # file number validation
        validate_files_dict['are_files_valid'] = False
        validate_files_dict['message'] = 'Only one file can be uploaded.'

    file_selected = False
    in_file_name = ''
    nc_file_name = ''
    if files:
        file_selected = True
        in_file_name = files[0].file.name
        nc_file_name = os.path.splitext(files[0].name)[0]
    elif source_names:
        nc_file_name = os.path.splitext(os.path.basename(source_names[0]))[0]
        ref_tmpfiles = utils.get_fed_zone_files(source_names)
        if ref_tmpfiles:
            in_file_name = ref_tmpfiles[0]
            file_selected = True

    if file_selected and in_file_name:
        # file type validation and existing metadata update and create new ncdump text file
        nc_dataset = nc_utils.get_nc_dataset(in_file_name)
        if isinstance(nc_dataset, netCDF4.Dataset):
            # delete all existing resource files and metadata related
            for f in ResourceFile.objects.filter(object_id=nc_res.id):
                delete_resource_file_only(nc_res, f)

            # update resource modification info
            user = kwargs['user']
            utils.resource_modified(nc_res, user, overwrite_bag=False)

            # extract metadata
            res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(in_file_name)

            # update title info
            if res_dublin_core_meta.get('title'):
                if nc_res.metadata.title:
                    nc_res.metadata.title.delete()
                nc_res.metadata.create_element('title', value=res_dublin_core_meta['title'])

            # update description info
            if res_dublin_core_meta.get('description'):
                if nc_res.metadata.description:
                    nc_res.metadata.description.delete()
                nc_res.metadata.create_element('description',
                                               abstract=res_dublin_core_meta.get('description'))

            # update creator info
            if res_dublin_core_meta.get('creator_name'):
                name = res_dublin_core_meta.get('creator_name')
                email = res_dublin_core_meta.get('creator_email', '')
                url = res_dublin_core_meta.get('creator_url', '')
                arguments = dict(name=name, email=email, homepage=url)
                creator = nc_res.metadata.creators.all().filter(name=name).first()
                if creator:
                    order = creator.order
                    if order != 1:
                        creator.delete()
                        arguments['order'] = order
                        nc_res.metadata.create_element('creator', **arguments)
                else:
                    nc_res.metadata.create_element('creator', **arguments)

            # update contributor info
            if res_dublin_core_meta.get('contributor_name'):
                name_list = res_dublin_core_meta['contributor_name'].split(',')
                existing_contributor_names = [contributor.name
                                              for contributor in nc_res.metadata.contributors.all()]
                for name in name_list:
                    if name not in existing_contributor_names:
                        nc_res.metadata.create_element('contributor', name=name)

            # update subject info
            if res_dublin_core_meta.get('subject'):
                keywords = res_dublin_core_meta['subject'].split(',')
                existing_keywords = [subject.value for subject in nc_res.metadata.subjects.all()]
                for keyword in keywords:
                    if keyword not in existing_keywords:
                        nc_res.metadata.create_element('subject', value=keyword)

            # update source
            if res_dublin_core_meta.get('source'):
                for source in nc_res.metadata.sources.all():
                    source.delete()
                nc_res.metadata.create_element('source',
                                               derived_from=res_dublin_core_meta.get('source'))

            # update license element:
            if res_dublin_core_meta.get('rights'):
                raw_info = res_dublin_core_meta.get('rights')
                b = re.search("(?P<url>https?://[^\s]+)", raw_info)
                url = b.group('url') if b else ''
                statement = raw_info.replace(url, '') if url else raw_info
                if nc_res.metadata.rights:
                    nc_res.metadata.rights.delete()
                nc_res.metadata.create_element('rights', statement=statement, url=url)

            # update relation
            if res_dublin_core_meta.get('references'):
                nc_res.metadata.relations.filter(type='cites').all().delete()
                nc_res.metadata.create_element('relation', type='cites',
                                               value=res_dublin_core_meta['references'])

            # update box info
            nc_res.metadata.coverages.all().delete()
            if res_dublin_core_meta.get('box'):
                nc_res.metadata.create_element('coverage', type='box',
                                               value=res_dublin_core_meta['box'])

            # update period info
            if res_dublin_core_meta.get('period'):
                nc_res.metadata.create_element('coverage', type='period',
                                               value=res_dublin_core_meta['period'])

            # update variable info
            nc_res.metadata.variables.all().delete()
            for var_info in res_type_specific_meta.values():
                nc_res.metadata.create_element('variable',
                                               name=var_info['name'],
                                               unit=var_info['unit'],
                                               type=var_info['type'],
                                               shape=var_info['shape'],
                                               missing_value=var_info['missing_value'],
                                               descriptive_name=var_info['descriptive_name'],
                                               method=var_info['method'])

            # update the original spatial coverage meta
            nc_res.metadata.ori_coverage.all().delete()
            if res_dublin_core_meta.get('original-box'):
                if res_dublin_core_meta.get('projection-info'):
                    nc_res.metadata.create_element(
                        'originalcoverage',
                        value=res_dublin_core_meta['original-box'],
                        projection_string_type=res_dublin_core_meta['projection-info']['type'],
                        projection_string_text=res_dublin_core_meta['projection-info']['text'],
                        datum=res_dublin_core_meta['projection-info']['datum'])
                else:
                    nc_res.metadata.create_element('originalcoverage',
                                                   value=res_dublin_core_meta['original-box'])

            # create the ncdump text file
            dump_file = create_header_info_txt_file(in_file_name, nc_file_name)
            dump_file_name = nc_file_name + '_header_info.txt'
            uploaded_file = UploadedFile(file=open(dump_file), name=dump_file_name)
            files.append(uploaded_file)

        else:
            validate_files_dict['are_files_valid'] = False
            validate_files_dict['message'] = 'Please check if the uploaded file is in ' \
                                             'valid NetCDF format.'

        if source_names and in_file_name:
            shutil.rmtree(os.path.dirname(in_file_name))
Пример #31
0
    def test_copy_composite_resource(self):
        """Test that logical file type objects gets copied along with the metadata that each
        logical file type object contains. Here we are not testing resource level metadata copy
        as that has been tested in separate unit tests"""

        self.raster_obj = open(self.temp_raster_file, 'r')
        files = [UploadedFile(file=self.raster_obj, name='cea.tif')]
        self.composite_resource = hydroshare.create_resource(
            resource_type='CompositeResource',
            owner=self.owner,
            title='Test Composite Resource',
            files=files,
            auto_aggregate=False)

        # run the resource post creation signal
        utils.resource_post_create_actions(
            resource=self.composite_resource,
            user=self.owner,
            metadata=self.composite_resource.metadata)

        self.assertEqual(self.composite_resource.files.all().count(), 1)
        res_file = self.composite_resource.files.first()

        # check that the resource file is not associated with file type
        self.assertEqual(res_file.has_logical_file, False)

        # set the tif file to GeoRasterFile type
        GeoRasterLogicalFile.set_file_type(self.composite_resource, self.owner,
                                           res_file.id)

        # ensure a nonowner who does not have permission to view a resource cannot copy it
        with self.assertRaises(PermissionDenied):
            hydroshare.create_empty_resource(self.composite_resource.short_id,
                                             self.nonowner,
                                             action='copy')
        # give nonowner view privilege so nonowner can create a new copy of this resource
        self.owner.uaccess.share_resource_with_user(self.composite_resource,
                                                    self.nonowner,
                                                    PrivilegeCodes.VIEW)

        orig_res_file = self.composite_resource.files.first()
        orig_geo_raster_lfo = orig_res_file.logical_file

        # add some key value metadata
        orig_geo_raster_lfo.metadata.extra_metadata = {
            'key-1': 'value-1',
            'key-2': 'value-2'
        }

        # create a copy fo the composite resource
        new_composite_resource = hydroshare.create_empty_resource(
            self.composite_resource.short_id, self.nonowner, action='copy')
        new_composite_resource = hydroshare.copy_resource(
            self.composite_resource, new_composite_resource)
        # check that there is 2 GeoRasterLogicalFile objects
        self.assertEqual(GeoRasterLogicalFile.objects.count(), 2)

        # compare the 2 GeoRasterLogicalFile objects from the original resource and the new one
        orig_res_file = self.composite_resource.files.first()
        orig_geo_raster_lfo = orig_res_file.logical_file
        copy_res_file = new_composite_resource.files.first()
        copy_geo_raster_lfo = copy_res_file.logical_file

        # check that we put the 2 files in a new folder (cea)
        for res_file in self.composite_resource.files.all():
            file_path, base_file_name = res_file.full_path, res_file.file_name
            expected_file_path = "{}/data/contents/cea/{}"
            expected_file_path = expected_file_path.format(
                self.composite_resource.root_path, base_file_name)
            self.assertEqual(file_path, expected_file_path)

        for res_file in new_composite_resource.files.all():
            file_path, base_file_name = res_file.full_path, res_file.file_name
            expected_file_path = "{}/data/contents/cea/{}"
            expected_file_path = expected_file_path.format(
                new_composite_resource.root_path, base_file_name)
            self.assertEqual(file_path, expected_file_path)

        # both logical file objects should have 2 resource files
        self.assertEqual(orig_geo_raster_lfo.files.count(),
                         copy_geo_raster_lfo.files.count())
        self.assertEqual(orig_geo_raster_lfo.files.count(), 2)

        # both logical file objects should have same dataset_name
        self.assertEqual(orig_geo_raster_lfo.dataset_name,
                         copy_geo_raster_lfo.dataset_name)
        # both should have same key/value metadata
        self.assertEqual(orig_geo_raster_lfo.metadata.extra_metadata,
                         copy_geo_raster_lfo.metadata.extra_metadata)

        # both logical file objects should have same coverage metadata
        self.assertEqual(orig_geo_raster_lfo.metadata.coverages.count(),
                         copy_geo_raster_lfo.metadata.coverages.count())

        self.assertEqual(orig_geo_raster_lfo.metadata.coverages.count(), 1)
        org_spatial_coverage = orig_geo_raster_lfo.metadata.spatial_coverage
        copy_spatial_coverage = copy_geo_raster_lfo.metadata.spatial_coverage
        self.assertEqual(org_spatial_coverage.type, copy_spatial_coverage.type)
        self.assertEqual(org_spatial_coverage.type, 'box')
        self.assertEqual(org_spatial_coverage.value['projection'],
                         copy_spatial_coverage.value['projection'])
        self.assertEqual(org_spatial_coverage.value['units'],
                         copy_spatial_coverage.value['units'])
        self.assertEqual(org_spatial_coverage.value['northlimit'],
                         copy_spatial_coverage.value['northlimit'])
        self.assertEqual(org_spatial_coverage.value['eastlimit'],
                         copy_spatial_coverage.value['eastlimit'])
        self.assertEqual(org_spatial_coverage.value['southlimit'],
                         copy_spatial_coverage.value['southlimit'])
        self.assertEqual(org_spatial_coverage.value['westlimit'],
                         copy_spatial_coverage.value['westlimit'])

        # both logical file objects should have same original coverage
        org_orig_coverage = orig_geo_raster_lfo.metadata.originalCoverage
        copy_orig_coverage = copy_geo_raster_lfo.metadata.originalCoverage
        self.assertEqual(org_orig_coverage.value['projection'],
                         copy_orig_coverage.value['projection'])
        self.assertEqual(org_orig_coverage.value['units'],
                         copy_orig_coverage.value['units'])
        self.assertEqual(org_orig_coverage.value['northlimit'],
                         copy_orig_coverage.value['northlimit'])
        self.assertEqual(org_orig_coverage.value['eastlimit'],
                         copy_orig_coverage.value['eastlimit'])
        self.assertEqual(org_orig_coverage.value['southlimit'],
                         copy_orig_coverage.value['southlimit'])
        self.assertEqual(org_orig_coverage.value['westlimit'],
                         copy_orig_coverage.value['westlimit'])

        # both logical file objects should have same cell information metadata
        orig_cell_info = orig_geo_raster_lfo.metadata.cellInformation
        copy_cell_info = copy_geo_raster_lfo.metadata.cellInformation
        self.assertEqual(orig_cell_info.rows, copy_cell_info.rows)
        self.assertEqual(orig_cell_info.columns, copy_cell_info.columns)
        self.assertEqual(orig_cell_info.cellSizeXValue,
                         copy_cell_info.cellSizeXValue)
        self.assertEqual(orig_cell_info.cellSizeYValue,
                         copy_cell_info.cellSizeYValue)
        self.assertEqual(orig_cell_info.cellDataType,
                         copy_cell_info.cellDataType)

        # both logical file objects should have same band information metadata
        self.assertEqual(orig_geo_raster_lfo.metadata.bandInformations.count(),
                         1)
        self.assertEqual(orig_geo_raster_lfo.metadata.bandInformations.count(),
                         copy_geo_raster_lfo.metadata.bandInformations.count())
        orig_band_info = orig_geo_raster_lfo.metadata.bandInformations.first()
        copy_band_info = copy_geo_raster_lfo.metadata.bandInformations.first()
        self.assertEqual(orig_band_info.noDataValue,
                         copy_band_info.noDataValue)
        self.assertEqual(orig_band_info.maximumValue,
                         copy_band_info.maximumValue)
        self.assertEqual(orig_band_info.minimumValue,
                         copy_band_info.minimumValue)

        # make sure to clean up all created resources to clean up iRODS storage
        if self.composite_resource:
            self.composite_resource.delete()
        if new_composite_resource:
            new_composite_resource.delete()
Пример #32
0
    def test_metadata_on_content_file_delete(self):
        files = [
            UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)
        ]
        utils.resource_file_add_pre_process(resource=self.resModelProgram,
                                            files=files,
                                            user=self.user,
                                            extract_metadata=False)

        utils.resource_file_add_process(resource=self.resModelProgram,
                                        files=files,
                                        user=self.user,
                                        extract_metadata=False)

        self.resModelProgram.metadata.create_element('Description',
                                                     abstract="test abstract")
        self.resModelProgram.metadata.create_element('Subject',
                                                     value="test subject")
        release_date = '2016-10-24T21:05:00.315907+00:00'
        self.resModelProgram.metadata.create_element(
            'MpMetadata',
            modelVersion='5.1.011',
            modelProgramLanguage='Fortran',
            modelOperatingSystem='Windows',
            modelReleaseDate=release_date,
            modelWebsite='http://www.hydroshare.org',
            modelCodeRepository='http://www.github.com',
            modelReleaseNotes='releaseNote.pdf',
            modelDocumentation='manual.pdf',
            modelSoftware='utilities.exe',
            modelEngine='sourceCode.zip')

        # there should one content file
        self.assertEquals(self.resModelProgram.files.all().count(), 1)

        # there should be one format element
        self.assertEquals(self.resModelProgram.metadata.formats.all().count(),
                          1)

        # the short path should just consist of the file name.
        self.assertEquals(self.resModelProgram.files.all()[0].short_path,
                          self.file_name)

        # delete content file that we added above; note that file name is a short_path
        hydroshare.delete_resource_file(self.resModelProgram.short_id,
                                        self.file_name, self.user)

        # there should no content file
        self.assertEquals(self.resModelProgram.files.all().count(), 0)

        # test the core metadata at this point
        self.assertNotEquals(self.resModelProgram.metadata.title, None)

        # there should be an abstract element
        self.assertNotEquals(self.resModelProgram.metadata.description, None)

        # there should be one creator element
        self.assertEquals(self.resModelProgram.metadata.creators.all().count(),
                          1)

        # testing extended metadata elements
        self.assertNotEqual(self.resModelProgram.metadata.program, None)
Пример #33
0
    def test_metadata_delete_on_resource_delete(self):
        # adding a valid netcdf file should generate some core metadata and all extended metadata
        files = [
            UploadedFile(file=self.netcdf_file_obj, name=self.netcdf_file_name)
        ]
        utils.resource_file_add_pre_process(resource=self.resNetcdf,
                                            files=files,
                                            user=self.user,
                                            extract_metadata=False)

        utils.resource_file_add_process(resource=self.resNetcdf,
                                        files=files,
                                        user=self.user,
                                        extract_metadata=True)

        # before resource delete
        # resource core metadata
        core_metadata_obj = self.resNetcdf.metadata
        self.assertEqual(CoreMetaData.objects.all().count(), 1)
        # there should be Creator metadata objects
        self.assertTrue(
            Creator.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Contributor metadata objects
        self.assertTrue(
            Contributor.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Identifier metadata objects
        self.assertTrue(
            Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Type metadata objects
        self.assertTrue(
            Type.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Source metadata objects
        self.assertTrue(
            Source.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Relation metadata objects
        self.assertTrue(
            Relation.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Publisher metadata objects
        self.assertFalse(
            Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Title metadata objects
        self.assertTrue(
            Title.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Description (Abstract) metadata objects
        self.assertTrue(
            Description.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Date metadata objects
        self.assertTrue(
            Date.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Subject metadata objects
        self.assertTrue(
            Subject.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Coverage metadata objects
        self.assertTrue(
            Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Format metadata objects
        self.assertTrue(
            Format.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Language metadata objects
        self.assertTrue(
            Language.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Rights metadata objects
        self.assertTrue(
            Rights.objects.filter(object_id=core_metadata_obj.id).exists())

        # resource specific metadata
        # there should be original coverage metadata objects
        self.assertTrue(
            OriginalCoverage.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Variable metadata objects
        self.assertTrue(
            Variable.objects.filter(object_id=core_metadata_obj.id).exists())

        # delete resource
        hydroshare.delete_resource(self.resNetcdf.short_id)
        self.assertEqual(CoreMetaData.objects.all().count(), 0)

        # there should be no Creator metadata objects
        self.assertFalse(
            Creator.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Contributor metadata objects
        self.assertFalse(
            Contributor.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be no Identifier metadata objects
        self.assertFalse(
            Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Type metadata objects
        self.assertFalse(
            Type.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Source metadata objects
        self.assertFalse(
            Source.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Relation metadata objects
        self.assertFalse(
            Relation.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Publisher metadata objects
        self.assertFalse(
            Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Title metadata objects
        self.assertFalse(
            Title.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Description (Abstract) metadata objects
        self.assertFalse(
            Description.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be no Date metadata objects
        self.assertFalse(
            Date.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Subject metadata objects
        self.assertFalse(
            Subject.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Coverage metadata objects
        self.assertFalse(
            Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Format metadata objects
        self.assertFalse(
            Format.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Language metadata objects
        self.assertFalse(
            Language.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Rights metadata objects
        self.assertFalse(
            Rights.objects.filter(object_id=core_metadata_obj.id).exists())

        # resource specific metadata
        # there should be original coverage metadata objects
        self.assertFalse(
            OriginalCoverage.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Variable metadata objects
        self.assertFalse(
            Variable.objects.filter(object_id=core_metadata_obj.id).exists())
Пример #34
0
    def test_metadata_delete_on_resource_delete(self):
        files = [
            UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)
        ]
        utils.resource_file_add_pre_process(resource=self.resModelProgram,
                                            files=files,
                                            user=self.user,
                                            extract_metadata=False)

        utils.resource_file_add_process(resource=self.resModelProgram,
                                        files=files,
                                        user=self.user,
                                        extract_metadata=False)

        self.resModelProgram.metadata.create_element('Description',
                                                     abstract="test abstract")
        self.resModelProgram.metadata.create_element('Subject',
                                                     value="test subject")
        release_date = '2016-10-24T21:05:00.315907+00:00'
        self.resModelProgram.metadata.create_element(
            'MpMetadata',
            modelVersion='5.1.011',
            modelProgramLanguage='Fortran',
            modelOperatingSystem='Windows',
            modelReleaseDate=release_date,
            modelWebsite='http://www.hydroshare.org',
            modelCodeRepository='http://www.github.com',
            modelReleaseNotes='releaseNote.pdf',
            modelDocumentation='manual.pdf',
            modelSoftware='utilities.exe',
            modelEngine='sourceCode.zip')
        self.resModelProgram.metadata.create_element('Contributor',
                                                     name="user2")

        # before resource delete
        core_metadata_obj = self.resModelProgram.metadata
        self.assertEqual(CoreMetaData.objects.all().count(), 1)
        # there should be Creator metadata objects
        self.assertTrue(
            Creator.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Contributor metadata objects
        self.assertTrue(
            Contributor.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Identifier metadata objects
        self.assertTrue(
            Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Type metadata objects
        self.assertTrue(
            Type.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Source metadata objects
        self.assertFalse(
            Source.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Relation metadata objects
        self.assertFalse(
            Relation.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Publisher metadata objects
        self.assertFalse(
            Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Title metadata objects
        self.assertTrue(
            Title.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Description (Abstract) metadata objects
        self.assertTrue(
            Description.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be Date metadata objects
        self.assertTrue(
            Date.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Subject metadata objects
        self.assertTrue(
            Subject.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Coverage metadata objects
        self.assertFalse(
            Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Format metadata objects
        self.assertTrue(
            Format.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Language metadata objects
        self.assertTrue(
            Language.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be Rights metadata objects
        self.assertTrue(
            Rights.objects.filter(object_id=core_metadata_obj.id).exists())

        # resource specific metadata
        # there should be Model Program metadata objects
        self.assertTrue(
            MpMetadata.objects.filter(object_id=core_metadata_obj.id).exists())

        # delete resource
        hydroshare.delete_resource(self.resModelProgram.short_id)
        self.assertEquals(CoreMetaData.objects.all().count(), 0)

        # there should be no Creator metadata objects
        self.assertFalse(
            Creator.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Contributor metadata objects
        self.assertFalse(
            Contributor.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be no Identifier metadata objects
        self.assertFalse(
            Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Type metadata objects
        self.assertFalse(
            Type.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Source metadata objects
        self.assertFalse(
            Source.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Relation metadata objects
        self.assertFalse(
            Relation.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Publisher metadata objects
        self.assertFalse(
            Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Title metadata objects
        self.assertFalse(
            Title.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Description (Abstract) metadata objects
        self.assertFalse(
            Description.objects.filter(
                object_id=core_metadata_obj.id).exists())
        # there should be no Date metadata objects
        self.assertFalse(
            Date.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Subject metadata objects
        self.assertFalse(
            Subject.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Coverage metadata objects
        self.assertFalse(
            Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Format metadata objects
        self.assertFalse(
            Format.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Language metadata objects
        self.assertFalse(
            Language.objects.filter(object_id=core_metadata_obj.id).exists())
        # there should be no Rights metadata objects
        self.assertFalse(
            Rights.objects.filter(object_id=core_metadata_obj.id).exists())

        # resource specific metadata
        # there should be no Model Output metadata objects
        self.assertFalse(
            MpMetadata.objects.filter(object_id=core_metadata_obj.id).exists())
Пример #35
0
 def setUp(self):
     self.files = [open(pwd + "cat1.jpg", "rb"), open(pwd + "cat2.jpg", "rb")]
     self.uploads = [UploadedFile(f, f.name, "image/jpeg", os.path.getsize(f.name)) for f in self.files]
Пример #36
0
def multiuploader(request):
    """
    Main Multiuploader module.
    Parses data from jQuery plugin and makes database changes.
    """
    if request.method == 'POST':
        log.info('received POST to main multiuploader view')
        if request.FILES == None:
            return HttpResponseBadRequest('Must have files attached!')

        #getting file data for farther manipulations
        file = request.FILES[u'files[]']
        wrapped_file = UploadedFile(file)
        filename = wrapped_file.name
        file_size = wrapped_file.file.size
        log.info('Got file: "%s"' % str(filename))
        log.info('Content type: "$s" % file.content_type')

        #writing file manually into model
        #because we don't need form of any type.
        image = MultiuploaderImage()
        image.filename = str(filename)
        image.image = file
        image.key_data = image.key_generate
        image.save()
        log.info('File saving done')

        #getting thumbnail url using sorl-thumbnail
        if 'image' in file.content_type.lower():
            im = get_thumbnail(image, "80x80", quality=50)
            thumb_url = im.url
        else:
            thumb_url = ''

        #settings imports
        try:
            file_delete_url = settings.MULTI_FILE_DELETE_URL + '/'
            file_url = settings.MULTI_IMAGE_URL + '/' + image.key_data + '/'
        except AttributeError:
            file_delete_url = 'multi_delete/'
            file_url = 'multi_image/' + image.key_data + '/'

        #generating json response array
        result = []
        result.append({
            "name": filename,
            "size": file_size,
            "url": file_url,
            "thumbnail_url": thumb_url,
            "delete_url": file_delete_url + str(image.pk) + '/',
            "delete_type": "POST",
        })
        response_data = simplejson.dumps(result)

        #checking for json data type
        #big thanks to Guy Shapiro
        if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
            mimetype = 'application/json'
        else:
            mimetype = 'text/plain'
        return HttpResponse(response_data, mimetype=mimetype)
    else:  #GET
        return HttpResponse('Only POST accepted')
Пример #37
0
 def test_unicode_uploadedfile_name(self):
     uf = UploadedFile(name='¿Cómo?', content_type='text')
     self.assertIs(type(repr(uf)), str)
Пример #38
0
    def set_file_type(cls, resource, file_id, user):
        """
            Sets a tif or zip raster resource file to GeoRasterFile type
            :param resource: an instance of resource type CompositeResource
            :param file_id: id of the resource file to be set as GeoRasterFile type
            :param user: user who is setting the file type
            :return:
            """

        # had to import it here to avoid import loop
        from hs_core.views.utils import create_folder, remove_folder

        log = logging.getLogger()

        # get the file from irods
        res_file = utils.get_resource_file_by_id(resource, file_id)

        # base file name (no path included)
        file_name = utils.get_resource_file_name_and_extension(res_file)[1]
        # file name without the extension
        file_name = file_name[:-len(res_file.extension)]
        file_folder = res_file.file_folder
        upload_folder = ''
        if res_file is not None and res_file.has_generic_logical_file:
            # get the file from irods to temp dir
            temp_file = utils.get_file_from_irods(res_file)
            # validate the file
            error_info, files_to_add_to_resource = raster_file_validation(
                raster_file=temp_file)
            if not error_info:
                log.info("Geo raster file type file validation successful.")
                # extract metadata
                temp_dir = os.path.dirname(temp_file)
                temp_vrt_file_path = [
                    os.path.join(temp_dir, f) for f in os.listdir(temp_dir)
                    if '.vrt' == os.path.splitext(f)[1]
                ].pop()
                metadata = extract_metadata(temp_vrt_file_path)
                log.info(
                    "Geo raster file type metadata extraction was successful.")
                with transaction.atomic():
                    # create a geo raster logical file object to be associated with resource files
                    logical_file = cls.create()
                    # by default set the dataset_name attribute of the logical file to the
                    # name of the file selected to set file type
                    logical_file.dataset_name = file_name
                    logical_file.save()

                    try:
                        # create a folder for the raster file type using the base file name as the
                        # name for the new folder
                        new_folder_path = cls.compute_file_type_folder(
                            resource, file_folder, file_name)

                        log.info("Folder created:{}".format(new_folder_path))
                        create_folder(resource.short_id, new_folder_path)

                        new_folder_name = new_folder_path.split('/')[-1]
                        if file_folder is None:
                            upload_folder = new_folder_name
                        else:
                            upload_folder = os.path.join(
                                file_folder, new_folder_name)

                        # add all new files to the resource
                        for f in files_to_add_to_resource:
                            uploaded_file = UploadedFile(
                                file=open(f, 'rb'), name=os.path.basename(f))
                            # the added resource file will be part of a new generic logical file
                            # by default
                            new_res_file = utils.add_file_to_resource(
                                resource, uploaded_file, folder=upload_folder)

                            # delete the generic logical file object
                            if new_res_file.logical_file is not None:
                                # deleting the file level metadata object will delete the associated
                                # logical file object
                                new_res_file.logical_file.metadata.delete()

                            # make each resource file we added as part of the logical file
                            logical_file.add_resource_file(new_res_file)

                        log.info(
                            "Geo raster file type - new files were added to the resource."
                        )

                        # use the extracted metadata to populate file metadata
                        for element in metadata:
                            # here k is the name of the element
                            # v is a dict of all element attributes/field names and field values
                            k, v = element.items()[0]
                            logical_file.metadata.create_element(k, **v)
                        log.info(
                            "Geo raster file type - metadata was saved to DB")
                        # set resource to private if logical file is missing required metadata
                        resource.update_public_and_discoverable()
                        # delete the original resource file
                        delete_resource_file(resource.short_id, res_file.id,
                                             user)
                        log.info("Deleted original resource file.")
                    except Exception as ex:
                        msg = "Geo raster file type. Error when setting file type. Error:{}"
                        msg = msg.format(ex.message)
                        log.exception(msg)
                        if upload_folder:
                            # delete any new files uploaded as part of setting file type
                            folder_to_remove = os.path.join(
                                'data', 'contents', upload_folder)
                            remove_folder(user, resource.short_id,
                                          folder_to_remove)
                            log.info("Deleted newly created file type folder")
                        raise ValidationError(msg)
                    finally:
                        # remove temp dir
                        if os.path.isdir(temp_dir):
                            shutil.rmtree(temp_dir)
            else:
                err_msg = "Geo raster file type file validation failed.{}".format(
                    ' '.join(error_info))
                log.info(err_msg)
                raise ValidationError(err_msg)
        else:
            if res_file is None:
                err_msg = "Failed to set Geo raster file type. " \
                          "Resource doesn't have the specified file."
                log.error(err_msg)
                raise ValidationError(err_msg)
            else:
                err_msg = "Failed to set Geo raster file type." \
                          "The specified file doesn't have a generic logical file type."
                log.error(err_msg)
                raise ValidationError(err_msg)
Пример #39
0
    def test_update_responses(self):
        formxml = FormSubmissionBuilder(
            form_id='123',
            form_properties={
                'breakfast': 'toast',   # Simple questions
                'lunch': 'sandwich',
                'cell': {               # Simple group
                    'cytoplasm': 'squishy',
                    'organelles': 'grainy',
                },
                'shelves': [            # Simple repeat group
                    {'position': 'top'},
                    {'position': 'middle'},
                    {'position': 'bottom'},
                ],
                'grandparent': [        # Repeat group with child group
                    {'name': 'Haruki'},
                    {'name': 'Sugako'},
                    {
                        'name': 'Emma',
                        'parent': {
                            'name': 'Haruki',
                            'child': {
                                'name': 'Nao',
                            },
                        }
                    },
                ],
                'body': [               # Repeat group with child repeat group
                    {'arm': [
                        {'elbow': '1'},
                        {'finger': '5'},
                    ]},
                    {'leg': [
                        {'knee': '1'},
                        {'toe': '5'},
                    ]},
                ],
            }
        ).as_xml_string()
        pic = UploadedFile(BytesIO(b"fake"),
                           'pic.jpg',
                           content_type='image/jpeg')
        xform = submit_form_locally(formxml,
                                    DOMAIN,
                                    attachments={
                                        "image": pic
                                    }).xform

        updates = {
            'breakfast': 'fruit',
            'cell/organelles': 'bulbous',
            'shelves[1]/position': 'third',
            'shelves[3]/position': 'first',
            'grandparent[1]/name': 'Haruki #1',
            'grandparent[3]/name': 'Ema',
            'grandparent[3]/parent/name': 'Haruki #2',
            'grandparent[3]/parent/child/name': 'Nao-chan',
            'body[1]/arm[1]/elbow': '2',
            'body[2]/leg[2]/toe': '10',
        }
        errors = FormProcessorInterface(DOMAIN).update_responses(
            xform, updates, 'user1')
        form = XFormInstance.objects.get_form(xform.form_id)
        self.assertEqual(0, len(errors))
        self.assertEqual('fruit', form.form_data['breakfast'])
        self.assertEqual('sandwich', form.form_data['lunch'])
        self.assertEqual('squishy', form.form_data['cell']['cytoplasm'])
        self.assertEqual('bulbous', form.form_data['cell']['organelles'])
        self.assertEqual('third', form.form_data['shelves'][0]['position'])
        self.assertEqual('middle', form.form_data['shelves'][1]['position'])
        self.assertEqual('first', form.form_data['shelves'][2]['position'])
        self.assertEqual('Haruki #1', form.form_data['grandparent'][0]['name'])
        self.assertEqual('Sugako', form.form_data['grandparent'][1]['name'])
        self.assertEqual('Ema', form.form_data['grandparent'][2]['name'])
        self.assertEqual('Haruki #2',
                         form.form_data['grandparent'][2]['parent']['name'])
        self.assertEqual(
            'Nao-chan',
            form.form_data['grandparent'][2]['parent']['child']['name'])
        self.assertEqual('2', form.form_data['body'][0]['arm'][0]['elbow'])
        self.assertEqual('5', form.form_data['body'][0]['arm'][1]['finger'])
        self.assertEqual('1', form.form_data['body'][1]['leg'][0]['knee'])
        self.assertEqual('10', form.form_data['body'][1]['leg'][1]['toe'])
        self.assertIn("image", form.attachments)
        self.assertEqual(form.get_attachment("image"), b"fake")
Пример #40
0
def get_uploaded_file(filename, content_type=None):
    return UploadedFile(open(filename, 'rb'), os.path.basename(filename),
                        content_type or mimetypes.guess_type(filename)[0])
Пример #41
0
    def set_file_type(cls, resource, user, file_id=None, folder_path=None):
        """ Creates a NetCDFLogicalFile (aggregation) from a netcdf file (.nc) resource file
        or a folder """

        log = logging.getLogger()
        res_file, folder_path = cls._validate_set_file_type_inputs(
            resource, file_id, folder_path)

        # base file name (no path included)
        file_name = res_file.file_name
        # file name without the extension - needed for naming the new aggregation folder
        nc_file_name = file_name[:-len(res_file.extension)]

        resource_metadata = []
        file_type_metadata = []
        upload_folder = ''
        res_files_to_delete = []
        # get the file from irods to temp dir
        temp_file = utils.get_file_from_irods(res_file)
        temp_dir = os.path.dirname(temp_file)

        # file validation and metadata extraction
        nc_dataset = nc_utils.get_nc_dataset(temp_file)
        if isinstance(nc_dataset, netCDF4.Dataset):
            msg = "NetCDF aggregation. Error when creating aggregation. Error:{}"
            file_type_success = False
            # extract the metadata from netcdf file
            res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(
                temp_file)
            # populate resource_metadata and file_type_metadata lists with extracted metadata
            add_metadata_to_list(resource_metadata, res_dublin_core_meta,
                                 res_type_specific_meta, file_type_metadata,
                                 resource)

            # create the ncdump text file
            dump_file = create_header_info_txt_file(temp_file, nc_file_name)
            file_folder = res_file.file_folder
            aggregation_folder_created = False
            create_new_folder = cls._check_create_aggregation_folder(
                selected_res_file=res_file,
                selected_folder=folder_path,
                aggregation_file_count=1)

            with transaction.atomic():
                # create a netcdf logical file object to be associated with
                # resource files
                dataset_title = res_dublin_core_meta.get('title', nc_file_name)
                logical_file = cls.initialize(dataset_title, resource)

                try:
                    if folder_path is None:
                        # we are here means aggregation is being created by selecting a file

                        # create a folder for the netcdf file type using the base file
                        # name as the name for the new folder if the file is not already in a folder
                        if create_new_folder:
                            upload_folder = cls._create_aggregation_folder(
                                resource, file_folder, nc_file_name)
                            aggregation_folder_created = True
                            log.info(
                                "NetCDF Aggregation creation - folder created:{}"
                                .format(upload_folder))
                        else:
                            # selected nc file is already in a folder
                            upload_folder = file_folder

                        # create logical file record in DB
                        logical_file.save()
                        if aggregation_folder_created:
                            # copy the nc file to the new aggregation folder and make it part
                            # of the logical file
                            tgt_folder = upload_folder
                            files_to_copy = [res_file]
                            logical_file.copy_resource_files(
                                resource, files_to_copy, tgt_folder)
                            res_files_to_delete.append(res_file)
                        else:
                            # make the selected nc file as part of the aggregation/file type
                            logical_file.add_resource_file(res_file)

                    else:
                        # logical file record gets created in DB
                        logical_file.save()
                        # folder has been selected to create aggregation
                        upload_folder = folder_path
                        # make the .nc file part of the aggregation
                        logical_file.add_resource_file(res_file)

                    # add the new dump txt file to the resource
                    uploaded_file = UploadedFile(
                        file=open(dump_file, 'rb'),
                        name=os.path.basename(dump_file))

                    new_res_file = utils.add_file_to_resource(
                        resource,
                        uploaded_file,
                        folder=upload_folder,
                        add_to_aggregation=False)

                    # make this new resource file we added part of the logical file
                    logical_file.add_resource_file(new_res_file)
                    log.info(
                        "NetCDF aggregation creation - a new file was added to the resource."
                    )

                    # use the extracted metadata to populate resource metadata
                    for element in resource_metadata:
                        # here k is the name of the element
                        # v is a dict of all element attributes/field names and field values
                        k, v = element.items()[0]
                        if k == 'title':
                            # update title element
                            title_element = resource.metadata.title
                            resource.metadata.update_element(
                                'title', title_element.id, **v)
                        else:
                            resource.metadata.create_element(k, **v)

                    log.info(
                        "NetCDF Aggregation creation - Resource metadata was saved to DB"
                    )

                    # use the extracted metadata to populate file metadata
                    for element in file_type_metadata:
                        # here k is the name of the element
                        # v is a dict of all element attributes/field names and field values
                        k, v = element.items()[0]
                        if k == 'subject':
                            logical_file.metadata.keywords = v
                            logical_file.metadata.save()
                            # update resource level keywords
                            resource_keywords = [
                                subject.value.lower() for subject in
                                resource.metadata.subjects.all()
                            ]
                            for kw in logical_file.metadata.keywords:
                                if kw.lower() not in resource_keywords:
                                    resource.metadata.create_element('subject',
                                                                     value=kw)
                        else:
                            logical_file.metadata.create_element(k, **v)
                    log.info(
                        "NetCDF aggregation - metadata was saved in aggregation"
                    )
                    logical_file._finalize(
                        user,
                        resource,
                        folder_created=aggregation_folder_created,
                        res_files_to_delete=res_files_to_delete)
                    file_type_success = True
                except Exception as ex:
                    msg = msg.format(ex.message)
                    log.exception(msg)
                finally:
                    # remove temp dir
                    if os.path.isdir(temp_dir):
                        shutil.rmtree(temp_dir)

            if not file_type_success:
                aggregation_from_folder = folder_path is not None
                cls._cleanup_on_fail_to_create_aggregation(
                    user, resource, upload_folder, file_folder,
                    aggregation_from_folder)
                raise ValidationError(msg)

        else:
            err_msg = "Not a valid NetCDF file. NetCDF aggregation validation failed."
            log.error(err_msg)
            # remove temp dir
            if os.path.isdir(temp_dir):
                shutil.rmtree(temp_dir)
            raise ValidationError(err_msg)
def thumbnailify(filebit, sizebit):
  from PIL import Image
  from os.path import splitext
  from django.http import HttpResponseRedirect, HttpResponse
  from io import BytesIO
  from django.core.files.uploadedfile import UploadedFile
  import sys
 

  browser_kind = [  ".png",".jpg",".gif" ]
  jsc3d_kind = [  ".stl",".obj" ]
  text_kind = [".md",".txt"]
# text_kind = [ ".txt" ]
  ##ext os the file extension, forced into lowercase becouse people are insane.
  ext = str(splitext(str(filebit.filename))[1].lower())
  response = HttpResponse(mimetype="image/png")

  if ext in browser_kind:
    print("filebit.filename"+str(filebit.filename.name))
    img = Image.open(filebit.filename)
    img.thumbnail(sizebit)
    print(img) 
    # Create a file-like object to write thumb data (thumb data previously created
    # using PIL, and stored in variable 'img')
    # using PIL, and stored in variable 'thumb')
#    thumb_io = BytesIO()
    thumb_io = BytesIO()
    img.save( thumb_io, format='png')
  
    # Create a new Django file-like object to be used in models as ImageField using
    # InMemoryUploadedFile.  If you look at the source in Django, a
    # SimpleUploadedFile is essentially instantiated similarly to what is shown here
    thumb_file = UploadedFile(thumb_io)
    thumb_file.name = str(sizebit)+"-"+str(filebit.filename)+".png"

# Once you have a Django file-like object, you may assign it to your ImageField
    # and save.
    return(thumb_file, "browser")

  if ext in jsc3d_kind:
    from selenium import webdriver
    from django.conf import settings

    driver = webdriver.PhantomJS()
    driver.set_window_size(sizebit[0],sizebit[1]) # not optional
    driver.get(settings.URL+"/thumbs/jsc3d/"+str(filebit.pk))
    imagedata = driver.get_screenshot_as_base64() # save a screenshot as base64 string, the only format phantom supports that isn't disk.

    import base64
    from io import BytesIO
    #converts the base64 encoded image data into a python file object
    thumb_io = BytesIO(base64.b64decode(imagedata))
    thumb_file = UploadedFile(thumb_io)
    thumb_file.name = str(sizebit)+"-"+str(filebit.filename)+".png"
#    thumb_file = False

    return(thumb_file, "jsc3d")

  if ext in text_kind:
    return(False, "text")

# if ext in text_kind:
#   print("filebit.filename"+str(filebit.filename.name))

#   from PIL import ImageFont, ImageDraw, Image
#   from django.conf import settings
#   from io import StringIO

#   img = Image.new("RGBA", (100,50), (255,255,255))
#   draw = ImageDraw.Draw(img)
#  #print(settings.URL+"/static/DejaVuSerif-Bold.ttf")
#  #font = ImageFont.truetype(settings.URL+"/media/DejaVuSerif-Bold.ttf", 12)

#   f = filebit.filename.file.read(64)
#   draw.text((10, 0), (f.decode("utf-8")[:16]), (0,0,0) )
#   draw.text((10, 10), (f.decode("utf-8")[16:32]), (0,0,0) )
#   draw.text((10, 20), (f.decode("utf-8")[32:48]), (0,0,0) )
#   draw.text((10, 30), (f.decode("utf-8")[48:64]), (0,0,0) )
#   draw.text((10, 40), ("\t..."), (0,0,0) )
#   img_resized = img.resize((sizebit), Image.ANTIALIAS)

#  #img = Image.open(filebit.filename)
#  #img.thumbnail(sizebit)
#   print(img) 
#   # Create a file-like object to write thumb data (thumb data previously created
#   # using PIL, and stored in variable 'img')
#   # using PIL, and stored in variable 'thumb')
#   thumb_io = BytesIO()
#   img.save( thumb_io, format='png')
# 
#   # Create a new Django file-like object to be used in models as ImageField using
#   # InMemoryUploadedFile.  If you look at the source in Django, a
#   # SimpleUploadedFile is essentially instantiated similarly to what is shown here
#   thumb_file = InMemoryUploadedFile(thumb_io, None, str(sizebit)+"-"+str(filebit.filename)+".png", 'image/jpeg',
#                                   1, None)
#  
#   # Once you have a Django file-like object, you may assign it to your ImageField
#   # and save.
#   return(thumb_file, "browser")


  return(False, "norender") 
Пример #43
0
def editOrCreateStuff(project, request, creating):

## Note: if creating == true this is a post being created.
#Because there are so many similarities in creating a post vs editing a post we are using this method, and using creating when we need to do something different for editing vs creating.

  ## postmode! We are getting pretty post data from the user!!!
    if request.method == 'POST':
    ## get the forms and check that they are valid
        formValid=False
        if creating:
            form = createForm(request.POST, project)
            form2 = defaulttag(request.POST)
            if form.is_valid() and form2.is_valid() and request.user.is_authenticated():
                formValid=True
               # If we are creating the post we need to set the author and title.
                project.author = request.user
                project.title = form.cleaned_data["title"]
        else:
            form = ProjectForm(request.POST, project)
            if form.is_valid() and str(project.author) == str(request.user):
                formValid=True
       ## if the form is valid make the changes to the project!
        if formValid:

          # Editing the Readme.md file stuff.

            if not creating:
          # Delete the old body text file... cause I'm a bad person and I don't know how to just open and write to the old one easily.
	        readme = project.bodyFile
                try:
                    readme = project.bodyFile
                    readmename = path.split(str(readme.filename))[1]
                    readme.delete()
                except:
                    pass

           # Save body as file
            bodyText = fileobject()

            bodyText.parent = project

            from django.core.files.uploadedfile import UploadedFile
            import base64
            from io import BytesIO
            from io import TextIOWrapper
            from io import StringIO

           #io = TextIOWrapper(TextIOBase(form.cleaned_data["body"]))
            io = StringIO(form.cleaned_data["body"])
            txfl = UploadedFile(io)


            #editfield may be renaming your readme to readme.md every time. That's not good.
            try:
                bodyText.filename.save(readmename, txfl)
            except:
                bodyText.filename.save('README.md', txfl)

            txfl.close()
            io.close()

            bodyText.save()

     #### this did not appear to be happening in the create.... but I think it should have been?
            project.bodyFile = bodyText

         # Done with editing the README.md textfile.

         # 
            list_to_tags(form.cleaned_data["tags"], project.tags)
            if creating:
                for i in form2.cleaned_data["categories"]:
                    project.tags.add(i)

         # This may be redundant, but either way, this post is not a draft past this point.
            project.draft=False

            project.save()

            return HttpResponseRedirect('/project/'+str(project.pk))

     #### If the form data was NOT valid
        else:
            if creating:
                return render_to_response('create.html', dict(user=request.user,  form=form, form2=form2, project=project))
            else:
                if str(project.author) == str(request.user):
                    return render_to_response('edit.html', dict(project=project, user=request.user, form=form, ))
                else:
                    return HttpResponse(status=403)

   #### Not POSTmode! We are setting up the form for the user to fill in. We are not getting form data from the user.

##### CREATE
    elif creating and request.user.is_authenticated():
        form = createForm("",project)
        form2 = defaulttag()
        return render_to_response('create.html', dict(user=request.user, form=form, form2=form2, project=project))

##### EDIT
    elif (not creating) and str(project.author) == str(request.user):
        if project.bodyFile:
            readme = project.bodyFile.filename.read()
        else:
            readme = project.body

        taglist = []
        for i in project.tags.names():
           taglist.append(i)
        taglist = ",".join(taglist)

        thumbnailstring = "/"+path.split(project.thumbnail.filename.url)[1]
        form = ProjectForm({'body': readme, 'thumbnail': thumbnailstring, 'tags' : str(taglist)}, project)
        return render_to_response('edit.html', dict(project=project, user=request.user, form=form,))
        #return HttpResponse(response_data, mimetype="application/json")
    else:
        return HttpResponse(status=403)
Пример #44
0
def _read(file: UploadedFile, n=None):
    """Read the file, guessing encoding if needed"""
    binary_content = file.read(n)
    encoding = _get_encoding(file.encoding, binary_content)
    return binary_content.decode(encoding)
Пример #45
0
    def test_bulk_metadata_update(self):
        # here we are testing the update() method of the RasterMetaData class

        # update of resource specific metadata should fail when the resource does not have content
        # files
        self.assertEqual(self.resRaster.files.all().count(), 0)
        self.assertEqual(
            self.resRaster.metadata.bandInformations.all().count(), 0)
        band_data = {
            'original_band_name': 'bandinfo',
            'name': 'Band_1',
            'variableName': 'digital elevation',
            'variableUnit': 'meter',
            'method': 'this is method',
            'comment': 'this is comment',
            'maximumValue': 1000,
            'minimumValue': 0,
            'noDataValue': -9999
        }
        metadata = []
        metadata.append({'bandinformation': band_data})
        with self.assertRaises(ValidationError):
            self.resRaster.metadata.update(metadata)
        self.assertEqual(
            self.resRaster.metadata.bandInformations.all().count(), 0)
        del metadata[:]
        # adding a valid tiff file should generate some core metadata and all extended metadata
        files = [
            UploadedFile(file=self.raster_tif_file_obj,
                         name=self.raster_tif_file_name)
        ]
        utils.resource_file_add_process(resource=self.resRaster,
                                        files=files,
                                        user=self.user,
                                        extract_metadata=False)

        # testing extended metadata element: band information
        self.assertEqual(self.resRaster.metadata.bandInformations.count(), 1)
        band_info = self.resRaster.metadata.bandInformations.first()
        self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
        self.assertEqual(band_info.maximumValue, '3031.44311523')
        self.assertEqual(band_info.minimumValue, '1358.33459473')
        self.assertEqual(band_info.name, 'Band_1')
        # updating of bandinformation using a name that does not exist (band-name) should fail
        band_data = {
            'original_band_name': 'band-name',
            'name': 'Band_1',
            'variableName': 'digital elevation',
            'variableUnit': 'meter',
            'method': 'this is method',
            'comment': 'this is comment',
            'maximumValue': 1000,
            'minimumValue': 0,
            'noDataValue': -9999
        }

        metadata.append({'bandinformation': band_data})
        with self.assertRaises(ValidationError):
            self.resRaster.metadata.update(metadata)
        self.assertEqual(
            self.resRaster.metadata.bandInformations.all().count(), 1)
        # updating of bandinformation using a valid band lookup name (Band_1) should be successful
        band_data = {
            'original_band_name': 'Band_1',
            'name': 'Band_2',
            'variableName': 'digital elevation',
            'variableUnit': 'meter',
            'method': 'this is method',
            'comment': 'this is comment',
            'maximumValue': 1000,
            'minimumValue': 0,
            'noDataValue': -9999
        }
        del metadata[:]
        metadata.append({'bandinformation': band_data})
        self.resRaster.metadata.update(metadata)
        self.assertEqual(
            self.resRaster.metadata.bandInformations.all().count(), 1)
        band_info = self.resRaster.metadata.bandInformations.first()
        self.assertEqual(band_info.name, 'Band_2')
        self.assertEqual(band_info.variableName, 'digital elevation')
        self.assertEqual(band_info.variableUnit, 'meter')
        self.assertEqual(band_info.method, 'this is method')
        self.assertEqual(band_info.comment, 'this is comment')
        self.assertEqual(band_info.maximumValue, '1000')
        self.assertEqual(band_info.minimumValue, '0')
        self.assertEqual(band_info.noDataValue, '-9999')

        # test updating only one attribute of bandinformation
        band_data = {'original_band_name': 'Band_2', 'name': 'Band_1'}
        del metadata[:]
        metadata.append({'bandinformation': band_data})
        self.resRaster.metadata.update(metadata)
        self.assertEqual(
            self.resRaster.metadata.bandInformations.all().count(), 1)
        band_info = self.resRaster.metadata.bandInformations.first()
        self.assertEqual(band_info.name, 'Band_1')

        # test updating both core and resource specific metadata
        # there should be 1 creator
        self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
        # there should be no contributor
        self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)
        del metadata[:]
        metadata.append({'creator': {'name': 'creator one'}})
        metadata.append({'creator': {'name': 'creator two'}})
        metadata.append({'contributor': {'name': 'contributor one'}})
        metadata.append({'contributor': {'name': 'contributor two'}})
        band_data = {'original_band_name': 'Band_1', 'name': 'Band_3'}
        metadata.append({'bandinformation': band_data})
        self.resRaster.metadata.update(metadata)
        band_info = self.resRaster.metadata.bandInformations.first()
        self.assertEqual(band_info.name, 'Band_3')
        # there should be 2 creators
        self.assertEqual(self.resRaster.metadata.creators.all().count(), 2)
        # there should be 2 contributor
        self.assertEqual(self.resRaster.metadata.contributors.all().count(), 2)

        self.resRaster.delete()
Пример #46
0
def migrate_tif_file(apps, schema_editor):
    log = logging.getLogger()
    istorage = IrodsStorage()

    copy_res_fail = []
    vrt_update_fail = []
    vrt_update_success = []
    meta_update_fail = []
    meta_update_success = []

    # start migration for each raster resource that has raster files
    for res in RasterResource.objects.all():
        if res.files.all():
            # copy all the resource files to temp dir
            try:
                temp_dir = tempfile.mkdtemp()
                for res_file in res.files.all():
                    shutil.copy(
                        res_file.resource_file.file.name,
                        os.path.join(
                            temp_dir,
                            os.path.basename(res_file.resource_file.name)))

                vrt_file_path = [
                    os.path.join(temp_dir, f) for f in os.listdir(temp_dir)
                    if '.vrt' == f[-4:]
                ].pop()

            except Exception as e:
                log.exception(str(e))
                copy_res_fail.append('{}:{}'.format(res.short_id,
                                                    res.metadata.title.value))
                continue

            # update vrt file if the raster resource that has a single tif file
            try:
                if len(os.listdir(temp_dir)) == 2:
                    # create new vrt file
                    tif_file_path = [
                        os.path.join(temp_dir, f) for f in os.listdir(temp_dir)
                        if '.tif' == f[-4:]
                    ].pop()
                    with open(os.devnull, 'w') as fp:
                        subprocess.Popen(
                            [
                                'gdal_translate', '-of', 'VRT', tif_file_path,
                                vrt_file_path
                            ],
                            stdout=fp,
                            stderr=fp).wait()  # remember to add .wait()

                    # modify the vrt file contents
                    tree = ET.parse(vrt_file_path)
                    root = tree.getroot()
                    for element in root.iter('SourceFilename'):
                        element.attrib['relativeToVRT'] = '1'
                    tree.write(vrt_file_path)

                    # delete vrt res file
                    for f in res.files.all():
                        if 'vrt' == f.resource_file.name[-3:]:
                            f.resource_file.delete()
                            f.delete()

                    # add new vrt file to resource
                    new_file = UploadedFile(
                        file=open(vrt_file_path, 'r'),
                        name=os.path.basename(vrt_file_path))
                    hydroshare.add_resource_files(res.short_id, new_file)

                    # update the bag
                    bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id)
                    if istorage.exists(bag_name):
                        # delete the resource bag as the old bag is not valid
                        istorage.delete(bag_name)
                    resource_modified(res, res.creator)
                    vrt_update_success.append('{}:{}'.format(
                        res.short_id, res.metadata.title.value))

            except Exception as e:
                log.exception(str(e))
                vrt_update_fail.append('{}:{}'.format(
                    res.short_id, res.metadata.title.value))

            # update the metadata for the band information of all the raster resources
            try:
                meta_updated = False

                # extract meta
                ori_dir = os.getcwd()
                os.chdir(temp_dir)
                res_md_dict = raster_meta_extract.get_raster_meta_dict(
                    vrt_file_path)
                os.chdir(ori_dir)
                shutil.rmtree(temp_dir)

                # update band information metadata in django
                if res_md_dict['band_info']:
                    for i, band_meta in list(res_md_dict['band_info'].items()):
                        band_obj = res.metadata.bandInformation.filter(
                            name='Band_{}'.format(i)).first()
                        if band_obj:
                            res.metadata.update_element(
                                'bandInformation',
                                band_obj.id,
                                maximumValue=band_meta['maximumValue'],
                                minimumValue=band_meta['minimumValue'],
                                noDataValue=band_meta['noDataValue'],
                            )
                            meta_updated = True

                # update the bag if meta is updated
                if meta_updated:
                    bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id)
                    if istorage.exists(bag_name):
                        # delete the resource bag as the old bag is not valid
                        istorage.delete(bag_name)
                    resource_modified(res, res.creator)
                    meta_update_success.append('{}:{}'.format(
                        res.short_id, res.metadata.title.value))

            except Exception as e:
                log.exception(str(e))
                meta_update_fail.append('{}:{}'.format(
                    res.short_id, res.metadata.title.value))

    # Print migration results
    print(('Copy resource to temp folder failure: Number: {} List: {}'.format(
        len(copy_res_fail), copy_res_fail)))
    print(('VRT file update success: Number: {} List{}'.format(
        len(vrt_update_success), vrt_update_success)))
    print(('VRT file update fail: Number: {} List{}'.format(
        len(vrt_update_fail), vrt_update_fail)))
    print(('Meta update success: Number: {} List {}'.format(
        len(meta_update_success), meta_update_success)))
    print(
        ('Meta update fail: Number: {} List {}'.format(len(meta_update_fail),
                                                       meta_update_fail)))
Пример #47
0
def get_minecraft_avatar(minecraft_user, geometry_string, force=True):
    """
    This method uses the sorl-thumbnail cache backend to
    prevent images from being downloaded every time.
    It requires an username.
    """
    avatar_file = UploadedFile(
        file=StringIO(),
        name='%s/%s.png' % (settings.MEDIA_ROOT, tokey(minecraft_user)))

    try:
        # Check if the avatar is cached
        thumbnail = get_thumbnail(
            avatar_file, '100x100', quality=100, format='PNG')
    except IOError:
        download_thumbnail = True

    else:
        is_dummy = not hasattr(thumbnail, 'storage')
        if not is_dummy and not thumbnail.storage.exists(thumbnail.name):
            # It seems we have the avatar on cache (kvstore)
            # but it's not present on the storage
            download_thumbnail = True
            # Force remove thumbnail from kvstore
            sorl_default.kvstore.delete(thumbnail)
            # Log
            logger.warning('Avatar cache mismatch: %s (resetting)' % (
                minecraft_user,))
        else:
            logger.debug('Avatar fetched from cache: %s' % minecraft_user)
            download_thumbnail = False

    if download_thumbnail:
        logger.debug('Downloading avatar: %s' % minecraft_user)

        # Otherwise download avatar
        thumbnail = None

        try:
            skin_bin = requests.get(
                'http://s3.amazonaws.com/MinecraftSkins/%s.png' % (
                    minecraft_user
                )
            ).content
        except ConnectionError:
            return None

        try:
            skin = Image.open(StringIO(skin_bin)).convert('RGBA')
        except IOError:
            # Image not found or some permission error with S3
            if minecraft_user != 'char':
                if not force:
                    return None
                # Return default user avatar
                return settings.STATIC_URL + settings.DEFAULT_USER_AVATAR
        else:
            face = skin.crop((8, 8, 16, 16))
            accessories = skin.crop((40, 8, 48, 16))

            r, g, b, a = accessories.split()

            accessories = Image.merge('RGB', (r, g, b))
            mask = Image.merge('L', (a,))

            face.paste(accessories, (0, 0), mask)

            avatar = face.resize((135, 135))

            avatar.save(avatar_file, 'PNG')

            avatar_file.seek(0)

            # Save through sorl backend
            thumbnail = get_thumbnail(avatar_file, '100x100',
                                      quality=100, format='PNG')

    # Use the cached file
    return get_thumbnail(thumbnail, geometry_string,
                         quality=100, format='PNG').url
Пример #48
0
    def handle_noargs(self,
                      users=None,
                      review_requests=None,
                      diffs=None,
                      reviews=None,
                      diff_comments=None,
                      password=None,
                      verbosity=NORMAL,
                      **options):
        num_of_requests = None
        num_of_diffs = None
        num_of_reviews = None
        num_of_diff_comments = None
        random.seed()

        if review_requests:
            num_of_requests = self.parseCommand("review_requests",
                                                review_requests)

            # Setup repository.
            repo_dir = os.path.abspath(
                os.path.join(sys.argv[0], "..", "scmtools", "testdata",
                             "git_repo"))

            # Throw exception on error so transaction reverts.
            if not os.path.exists(repo_dir):
                raise CommandError("No path to the repository")

            self.repository = Repository.objects.create(
                name="Test Repository",
                path=repo_dir,
                tool=Tool.objects.get(name="Git"))

        if diffs:
            num_of_diffs = self.parseCommand("diffs", diffs)

            # Create the diff directory locations.
            diff_dir_tmp = os.path.abspath(
                os.path.join(sys.argv[0], "..", "reviews", "management",
                             "commands", "diffs"))

            # Throw exception on error so transaction reverts.
            if not os.path.exists(diff_dir_tmp):
                raise CommandError("Diff dir does not exist")

            diff_dir = diff_dir_tmp + '/'  # Add trailing slash.

            # Get a list of the appropriate files.
            files = [f for f in os.listdir(diff_dir) if f.endswith('.diff')]

            # Check for any diffs in the files.
            if len(files) == 0:
                raise CommandError("No diff files in this directory")

        if reviews:
            num_of_reviews = self.parseCommand("reviews", reviews)

        if diff_comments:
            num_of_diff_comments = self.parseCommand("diff-comments",
                                                     diff_comments)

        # Users is required for any other operation.
        if not users:
            raise CommandError("At least one user must be added")

        # Start adding data to the database.
        for i in range(1, users + 1):
            new_user = User.objects.create(
                username=self.randUsername(),  # Avoids having to flush db.
                first_name=random.choice(NAMES),
                last_name=random.choice(NAMES),
                email="*****@*****.**",
                is_staff=False,
                is_active=True,
                is_superuser=False)

            if password:
                new_user.set_password(password)
                new_user.save()
            else:
                new_user.set_password("test1")
                new_user.save()

            Profile.objects.create(user=new_user,
                                   first_time_setup_done=True,
                                   collapsed_diffs=True,
                                   wordwrapped_diffs=True,
                                   syntax_highlighting=True,
                                   show_submitted=True)

            # Review Requests.
            req_val = self.pickRandomValue(num_of_requests)

            if int(verbosity) > NORMAL:
                print "For user %s:%s" % (i, new_user.username)
                print "============================="

            for j in range(0, req_val):
                if int(verbosity) > NORMAL:
                    print "Request #%s:" % j

                review_request = ReviewRequest.objects.create(new_user, None)
                review_request.public = True
                review_request.summary = self.lorem_ipsum("summary")
                review_request.description = self.lorem_ipsum("description")
                review_request.shipit_count = 0
                review_request.repository = self.repository
                # Set the targeted reviewer to superuser or 1st defined.
                if j == 0:
                    review_request.target_people.add(User.objects.get(pk=1))
                review_request.save()

                # Add the diffs if any to add.
                diff_val = self.pickRandomValue(num_of_diffs)

                # If adding diffs add history.
                if diff_val > 0:
                    diffset_history = DiffSetHistory.objects.create(
                        name='testDiffFile' + str(i))
                    diffset_history.save()

                # Won't execute if diff_val is 0, ie: no diffs requested.
                for k in range(0, diff_val):
                    if int(verbosity) > NORMAL:
                        print "%s:\tDiff #%s" % (i, k)

                    random_number = random.randint(0, len(files) - 1)
                    file_to_open = diff_dir + files[random_number]
                    f = UploadedFile(open(file_to_open, 'r'))
                    form = UploadDiffForm(review_request.repository, f)
                    cur_diff = form.create(f, None, diffset_history)
                    review_request.diffset_history = diffset_history
                    review_request.save()
                    review_request.publish(new_user)
                    f.close()

                    # Add the reviews if any.
                    review_val = self.pickRandomValue(num_of_reviews)

                    for l in range(0, review_val):
                        if int(verbosity) > NORMAL:
                            print "%s:%s:\t\tReview #%s:" % (i, j, l)

                        reviews = Review.objects.create(
                            review_request=review_request, user=new_user)

                        reviews.publish(new_user)

                        # Add comments if any.
                        comment_val = self.pickRandomValue(
                            num_of_diff_comments)

                        for m in range(0, comment_val):
                            if int(verbosity) > NORMAL:
                                print "%s:%s:\t\t\tComments #%s" % (i, j, m)

                            if m == 0:
                                file_diff = cur_diff.files.order_by('id')[0]

                            # Choose random lines to comment.
                            # Max lines: should be mod'd in future to read
                            # diff.
                            max_lines = 220
                            first_line = random.randrange(1, max_lines - 1)
                            remain_lines = max_lines - first_line
                            num_lines = random.randrange(1, remain_lines)

                            diff_comment = Comment.objects.create(
                                filediff=file_diff,
                                text="comment number %s" % (m + 1),
                                first_line=first_line,
                                num_lines=num_lines)

                            review_request.publish(new_user)

                            reviews.comments.add(diff_comment)
                            reviews.save()
                            reviews.publish(new_user)

                            db.reset_queries()

                        # No comments, so have previous layer clear queries.
                        if comment_val == 0:
                            db.reset_queries()

                    if review_val == 0:
                        db.reset_queries()

                if diff_val == 0:
                    db.reset_queries()

            if req_val == 0:
                db.reset_queries()

            # Generate output as users & data is created.
            if req_val != 0:
                print "user %s created with %s requests" % (new_user.username,
                                                            req_val)
            else:
                print "user %s created successfully" % new_user.username
Пример #49
0
    def handle_noargs(self, users=None, review_requests=None, diffs=None,
                      reviews=None, diff_comments=None, password=None,
                      verbosity=NORMAL, **options):
        num_of_requests = None
        num_of_diffs = None
        num_of_reviews = None
        num_of_diff_comments = None
        random.seed()

        if review_requests:
            num_of_requests = self.parseCommand("review_requests",
                                                review_requests)

            # Setup repository.
            repo_dir = os.path.abspath(
                os.path.join(sys.argv[0], "..", "scmtools", "testdata",
                             "git_repo"))

            # Throw exception on error so transaction reverts.
            if not os.path.exists(repo_dir):
                raise CommandError("No path to the repository")

            self.repository = Repository.objects.create(
                name="Test Repository", path=repo_dir,
                tool=Tool.objects.get(name="Git"))

        if diffs:
            num_of_diffs = self.parseCommand("diffs", diffs)

            # Create the diff directory locations.
            diff_dir_tmp = os.path.abspath(
                os.path.join(sys.argv[0], "..", "reviews", "management",
                             "commands", "diffs"))

            # Throw exception on error so transaction reverts.
            if not os.path.exists(diff_dir_tmp):
                    raise CommandError("Diff dir does not exist")

            diff_dir = diff_dir_tmp + '/'  # Add trailing slash.

            # Get a list of the appropriate files.
            files = [f for f in os.listdir(diff_dir)
                     if f.endswith('.diff')]

            # Check for any diffs in the files.
            if len(files) == 0:
                raise CommandError("No diff files in this directory")

        if reviews:
            num_of_reviews = self.parseCommand("reviews", reviews)

        if diff_comments:
            num_of_diff_comments = self.parseCommand("diff-comments",
                                                     diff_comments)

        # Users is required for any other operation.
        if not users:
            raise CommandError("At least one user must be added")

        # Start adding data to the database.
        for i in range(1, users + 1):
            new_user = User.objects.create(
                username=self.randUsername(),  # Avoids having to flush db.
                first_name=random.choice(NAMES),
                last_name=random.choice(NAMES),
                email="*****@*****.**",
                is_staff=False,
                is_active=True,
                is_superuser=False)

            if password:
                new_user.set_password(password)
                new_user.save()
            else:
                new_user.set_password("test1")
                new_user.save()

            Profile.objects.create(
                user=new_user,
                first_time_setup_done=True,
                collapsed_diffs=True,
                wordwrapped_diffs=True,
                syntax_highlighting=True,
                show_submitted=True)

            # Review Requests.
            req_val = self.pickRandomValue(num_of_requests)

            if int(verbosity) > NORMAL:
                print "For user %s:%s" % (i, new_user.username)
                print "============================="

            for j in range(0, req_val):
                if int(verbosity) > NORMAL:
                    print "Request #%s:" % j

                review_request = ReviewRequest.objects.create(new_user, None)
                review_request.public = True
                review_request.summary = self.lorem_ipsum("summary")
                review_request.description = self.lorem_ipsum("description")
                review_request.shipit_count = 0
                review_request.repository = self.repository
                # Set the targeted reviewer to superuser or 1st defined.
                if j == 0:
                    review_request.target_people.add(User.objects.get(pk=1))
                review_request.save()

                # Add the diffs if any to add.
                diff_val = self.pickRandomValue(num_of_diffs)

                # If adding diffs add history.
                if diff_val > 0:
                    diffset_history = DiffSetHistory.objects.create(
                        name='testDiffFile' + str(i))
                    diffset_history.save()

                # Won't execute if diff_val is 0, ie: no diffs requested.
                for k in range(0, diff_val):
                    if int(verbosity) > NORMAL:
                        print "%s:\tDiff #%s" % (i, k)

                    random_number = random.randint(0, len(files) - 1)
                    file_to_open = diff_dir + files[random_number]
                    f = UploadedFile(open(file_to_open, 'r'))
                    form = UploadDiffForm(review_request.repository, f)
                    cur_diff = form.create(f, None, diffset_history)
                    review_request.diffset_history = diffset_history
                    review_request.save()
                    review_request.publish(new_user)
                    f.close()

                    # Add the reviews if any.
                    review_val = self.pickRandomValue(num_of_reviews)

                    for l in range(0, review_val):
                        if int(verbosity) > NORMAL:
                            print "%s:%s:\t\tReview #%s:" % (i, j, l)

                        reviews = Review.objects.create(
                            review_request=review_request,
                            user=new_user)

                        reviews.publish(new_user)

                        # Add comments if any.
                        comment_val = self.pickRandomValue(
                            num_of_diff_comments)

                        for m in range(0, comment_val):
                            if int(verbosity) > NORMAL:
                                print "%s:%s:\t\t\tComments #%s" % (i, j, m)

                            if m == 0:
                                file_diff = cur_diff.files.order_by('id')[0]

                            # Choose random lines to comment.
                            # Max lines: should be mod'd in future to read
                            # diff.
                            max_lines = 220
                            first_line = random.randrange(1, max_lines - 1)
                            remain_lines = max_lines - first_line
                            num_lines = random.randrange(1, remain_lines)

                            diff_comment = Comment.objects.create(
                                filediff=file_diff,
                                text="comment number %s" % (m + 1),
                                first_line=first_line,
                                num_lines=num_lines)

                            review_request.publish(new_user)

                            reviews.comments.add(diff_comment)
                            reviews.save()
                            reviews.publish(new_user)

                            db.reset_queries()

                        # No comments, so have previous layer clear queries.
                        if comment_val == 0:
                            db.reset_queries()

                    if review_val == 0:
                        db.reset_queries()

                if diff_val == 0:
                    db.reset_queries()

            if req_val == 0:
                db.reset_queries()

            # Generate output as users & data is created.
            if req_val != 0:
                print "user %s created with %s requests" % (
                    new_user.username, req_val)
            else:
                print "user %s created successfully" % new_user.username
Пример #50
0
def multiuploader(request, patient_id=None):
    """
    Main Multiuploader module.
    Parses data from jQuery plugin and makes database changes.
    """
    if request.method == 'POST':
        log.info('received POST to main multiuploader view')
        if request.FILES == None:
            return HttpResponseBadRequest('Must have files attached!')

        #getting file data for farther manipulations
        file = request.FILES[u'files[]']

        wrapped_file = UploadedFile(file)
        filename = wrapped_file.name
        file_size = wrapped_file.file.size
        log.info ('Got file: "%s"' % str(filename))
        log.info('Content type: "$s" % file.content_type')

        #writing file manually into model
        #because we don't need form of any type.
        print request.POST
        if patient_id=='new':
            print wrapped_file.read()
            return HttpResponse(status=204)
        
        image = MultiuploaderImage()
        patient=Patient.objects.get(pk=patient_id)
        image.filename=str(filename)
        image.image=file
        image.key_data = image.key_generate
        patient.multiuploaderimage_set.add(image)
        log.info('File saving done')

        #getting thumbnail url using sorl-thumbnail
        if 'image' in file.content_type.lower():
            im = get_thumbnail(image, "80x80", quality=50)
            thumb_url = im.url
        else:
            thumb_url = ''

        #settings imports
        try:
            file_delete_url = settings.MULTI_FILE_DELETE_URL+'/'
            file_url = settings.MULTI_IMAGE_URL+'/'+image.key_data+'/'
        except AttributeError:
            file_delete_url = 'multi_delete/'
            file_url = 'multi_image/'+image.key_data+'/'

        """
        is actually: [{"name": "Screenshot from 2012-11-14 16:17:46.png", "url": "multi_image/95925526541943247735000327303075602114185579370918344597903504067450818566531/", "thumbnail_url": "/media/cache/f8/bd/f8bd83aadeba651ff9c040bb394ce117.jpg", "delete_type": "POST", "delete_url": "multi_delete/7/", "size": 38520}]
        should be:   {"files":[{"url":"http://jquerey-file-upload.appspot.com/AMIfv9734HSTDGd3tIybbnKVru--IjhjULKvNcIGUL2lvfqA93RNCAizDbvP-RQJNbh-N9m8UXsk-90jFFYSp8TlbZYhEcNN6Vb9HzQVQtdmF83H6sE_XkdnlI2V8lHX5V3Y4AamdX6VMbAt9sNWNx2BVGzhTfAYkRLYmRE1VzzWSe9C8c8Fu8g/Screenshot%20from%202012-11-14%2016%3A17%3A46.png","thumbnail_url":"http://lh5.ggpht.com/fcjVNT6qUGoMDtqqaNDNtU4mghy34qlzfj2GujikLgC7Nj5Bs4LUT_DWG_Q8OWujqvYHsKbeQ9pkvoAW4WiaubmqQxobIPyt=s80","name":"Screenshot from 2012-11-14 16:17:46.png","type":"image/png","size":38520,"delete_url":"http://jquery-file-upload.appspot.com/AMIfv9734HSTDGd3tIybbnKVru--IjhjULKvNcIGUL2lvfqA93RNCAizDbvP-RQJNbh-N9m8UXsk-90jFFYSp8TlbZYhEcNN6Vb9HzQVQtdmF83H6sE_XkdnlI2V8lHX5V3Y4AamdX6VMbAt9sNWNx2BVGzhTfAYkRLYmRE1VzzWSe9C8c8Fu8g/Screenshot%20from%202012-11-14%2016%3A17%3A46.png?delete=true","delete_type":"DELETE"}]}
        """

        #generating json response array
        result = {
            'files': [ {"name":filename, 
                       "size":file_size, 
                       "url":file_url, 
                       "thumbnail_url":thumb_url,
                       "delete_url":file_delete_url+str(image.pk)+'/', 
                       "delete_type":"POST",}
                    ]
        }
        response_data = simplejson.dumps(result)
        
        #checking for json data type
        #big thanks to Guy Shapiro
        if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
            mimetype = 'application/json'
        else:
            mimetype = 'text/plain'
        return HttpResponse(status=204)
            #response_data, mimetype=mimetype)
    else: #GETim
        return HttpResponse('Only POST accepted')
Пример #51
0
def get_form(request, form_class, field_order, bound=False):
    """
    Generic function. Used for all submission types. Specify the ``form_class``
    that's given in ``forms.py``. The ``field_order`` is a list of strings that
    indicates the linear order of the fields in the form. A ``bound`` form
    is a function of the object assigned to ``bound`` (see below). An unbound
    form is simply an empty form.
    """
    if bound:
        if isinstance(bound, models.Revision):
            tags = ','.join([str(tag) for tag in bound.tags.all()])
            fields =  {'item_url': bound.item_url,
                       'title': bound.title,
                       'description': bound.description,
                       'sub_tags': tags,
                       'snippet_code': bound.item_code,
                       'sub_type': 'snippet',
                       'sub_license': bound.sub_license_id,
                       }
            if bound.entry.sub_type == 'link':
                fields['sub_type'] = 'link'
            elif bound.entry.sub_type == 'package':
                fields['sub_type'] = 'package'
            elif  bound.entry.sub_type == 'code':
                fields['sub_type'] = 'snippet'

            form_output = form_class(fields)
        else:
            if request.POST['sub_type'] == 'package':
                # Create a fake "UploadedFile" object, so the user can resume
                # editing or finish their submission, without being told
                # they have to reenter this field.
                zip_hash = request.POST.get('package_hash', '')
                zip_file = models.ZipFile.objects.filter(zip_hash=zip_hash)
                if zip_file:
                    zip_name = zip_file[0].raw_zip_file.name
                    uploaded_file = UploadedFile(zip_name, name=zip_name,
                                        content_type='application/zip',
                                        size=zip_file[0].raw_zip_file.size)
                    uploaded_file.skip_validation = True # see ``forms.py``
                    request.FILES['package_file'] = uploaded_file
            form_output = form_class(request.POST, request.FILES)

            if request.POST['sub_type'] == 'package' and zip_file:
                form_output.fields['package_file'].initial = uploaded_file

    else:
        form_output = form_class()

    # Rearrange the form order
    form_output.fields.keyOrder = field_order
    index = 1
    for field_name, field in form_output.fields.iteritems():
        field.widget.attrs['tabindex'] = str(index)
        index += 1

    if request.user.is_authenticated():
        # Email field not required for signed-in users
        form_output.fields.pop('email')

    return form_output
Пример #52
0
    def upload_file_view(self, request):
        try:
            if not self.has_add_permission(request):
                raise PermissionDenied

            FILE_PARAM_NAME = 'qqfile'
            self.init_parent_folder(request)

            if request.method == 'POST':

                if request.is_ajax() and request.GET.get(
                        FILE_PARAM_NAME, None):
                    from django.core.files.base import ContentFile
                    from django.core.files.uploadedfile import UploadedFile
                    content_file = ContentFile(request.raw_post_data)
                    uploaded_file = UploadedFile(
                        content_file, request.GET.get(FILE_PARAM_NAME), None,
                        content_file.size)
                    form = UploadForm(request.POST, {'file': uploaded_file})
                else:
                    form = UploadForm(request.POST, request.FILES)

                if form.is_valid():
                    node = FileNode(file=form.cleaned_data['file'],
                                    node_type=FileNode.FILE)
                    parent_folder = self.get_parent_folder(request)
                    if not parent_folder.is_top_node():
                        node.parent = parent_folder
                    self.save_model(request, node, None, False)
                    # Respond with 'ok' for the client to verify that the upload was successful, since sometimes a failed
                    # request would not result in a HTTP error and look like a successful upload.
                    # For instance: When requesting the admin view without authentication, there is a redirect to the
                    # login form, which to SWFUpload looks like a successful upload request.
                    if request.is_ajax():
                        return HttpResponse('{"success": true}',
                                            mimetype="application/json")
                    else:
                        messages.info(
                            request,
                            _('Successfully uploaded file %s.') % node.name)
                        return HttpResponseRedirect(
                            reverse('admin:media_tree_filenode_changelist'))
                else:
                    # invalid form data
                    if request.is_ajax():
                        return HttpResponse('{"error": "%s"}' % ' '.join([
                            item for sublist in form.errors.values()
                            for item in sublist
                        ]),
                                            mimetype="application/json")

            # Form is rendered for troubleshooting SWFUpload. If this form works, the problem is not server-side.
            if not settings.DEBUG:
                raise ViewDoesNotExist
            if request.method == 'GET':
                form = UploadForm()
            return render_to_response(
                'admin/media_tree/filenode/upload_form.html', {'form': form})

        except Exception as e:
            if request.is_ajax():
                return HttpResponse('{"error": "%s"}' %
                                    ugettext('Server Error'),
                                    mimetype="application/json")
            else:
                raise
Пример #53
0
    def set_file_type(cls, resource, file_id, user):
        """
            Sets a tif or zip raster resource file to GeoRasterFile type
            :param resource: an instance of resource type CompositeResource
            :param file_id: id of the resource file to be set as GeoRasterFile type
            :param user: user who is setting the file type
            :return:
            """

        # had to import it here to avoid import loop
        from hs_core.views.utils import create_folder, remove_folder

        log = logging.getLogger()

        # get the file from irods
        res_file = utils.get_resource_file_by_id(resource, file_id)

        if res_file is None:
            raise ValidationError("File not found.")

        if res_file.extension != '.nc':
            raise ValidationError("Not a NetCDF file.")

        # base file name (no path included)
        file_name = res_file.file_name
        # file name without the extension
        nc_file_name = file_name[:-len(res_file.extension)]

        resource_metadata = []
        file_type_metadata = []
        files_to_add_to_resource = []
        upload_folder = ''
        if res_file.has_generic_logical_file:
            # get the file from irods to temp dir
            temp_file = utils.get_file_from_irods(res_file)
            temp_dir = os.path.dirname(temp_file)
            files_to_add_to_resource.append(temp_file)
            # file validation and metadata extraction
            nc_dataset = nc_utils.get_nc_dataset(temp_file)
            if isinstance(nc_dataset, netCDF4.Dataset):
                # Extract the metadata from netcdf file
                res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(
                    temp_file)
                # populate resource_metadata and file_type_metadata lists with extracted metadata
                add_metadata_to_list(resource_metadata, res_dublin_core_meta,
                                     res_type_specific_meta,
                                     file_type_metadata, resource)

                # create the ncdump text file
                dump_file = create_header_info_txt_file(
                    temp_file, nc_file_name)
                files_to_add_to_resource.append(dump_file)
                file_folder = res_file.file_folder
                with transaction.atomic():
                    # create a netcdf logical file object to be associated with
                    # resource files
                    logical_file = cls.create()

                    # by default set the dataset_name attribute of the logical file to the
                    # name of the file selected to set file type unless the extracted metadata
                    # has a value for title
                    dataset_title = res_dublin_core_meta.get('title', None)
                    if dataset_title is not None:
                        logical_file.dataset_name = dataset_title
                    else:
                        logical_file.dataset_name = nc_file_name
                    logical_file.save()

                    try:
                        # create a folder for the netcdf file type using the base file
                        # name as the name for the new folder
                        new_folder_path = cls.compute_file_type_folder(
                            resource, file_folder, nc_file_name)

                        create_folder(resource.short_id, new_folder_path)
                        log.info("Folder created:{}".format(new_folder_path))

                        new_folder_name = new_folder_path.split('/')[-1]
                        if file_folder is None:
                            upload_folder = new_folder_name
                        else:
                            upload_folder = os.path.join(
                                file_folder, new_folder_name)
                        # add all new files to the resource
                        for f in files_to_add_to_resource:
                            uploaded_file = UploadedFile(
                                file=open(f, 'rb'), name=os.path.basename(f))
                            # the added resource file will be part of a new generic logical file
                            # by default
                            new_res_file = utils.add_file_to_resource(
                                resource, uploaded_file, folder=upload_folder)

                            # delete the generic logical file object
                            if new_res_file.logical_file is not None:
                                # deleting the file level metadata object will delete the associated
                                # logical file object
                                new_res_file.logical_file.metadata.delete()

                            # make each resource file we added part of the logical file
                            logical_file.add_resource_file(new_res_file)

                        log.info(
                            "NetCDF file type - new files were added to the resource."
                        )

                        # use the extracted metadata to populate resource metadata
                        for element in resource_metadata:
                            # here k is the name of the element
                            # v is a dict of all element attributes/field names and field values
                            k, v = element.items()[0]
                            if k == 'title':
                                # update title element
                                title_element = resource.metadata.title
                                resource.metadata.update_element(
                                    'title', title_element.id, **v)
                            else:
                                resource.metadata.create_element(k, **v)

                        log.info("Resource - metadata was saved to DB")

                        # use the extracted metadata to populate file metadata
                        for element in file_type_metadata:
                            # here k is the name of the element
                            # v is a dict of all element attributes/field names and field values
                            k, v = element.items()[0]
                            if k == 'subject':
                                logical_file.metadata.keywords = v
                                logical_file.metadata.save()
                                # update resource level keywords
                                resource_keywords = [
                                    subject.value.lower() for subject in
                                    resource.metadata.subjects.all()
                                ]
                                for kw in logical_file.metadata.keywords:
                                    if kw.lower() not in resource_keywords:
                                        resource.metadata.create_element(
                                            'subject', value=kw)
                            else:
                                logical_file.metadata.create_element(k, **v)
                        log.info("NetCDF file type - metadata was saved to DB")
                        # set resource to private if logical file is missing required metadata
                        resource.update_public_and_discoverable()
                        # delete the original resource file
                        delete_resource_file(resource.short_id, res_file.id,
                                             user)
                        log.info("Deleted original resource file.")
                    except Exception as ex:
                        msg = "NetCDF file type. Error when setting file type. Error:{}"
                        msg = msg.format(ex.message)
                        log.exception(msg)
                        if upload_folder:
                            # delete any new files uploaded as part of setting file type
                            folder_to_remove = os.path.join(
                                'data', 'contents', upload_folder)
                            remove_folder(user, resource.short_id,
                                          folder_to_remove)
                            log.info("Deleted newly created file type folder")
                        raise ValidationError(msg)
                    finally:
                        # remove temp dir
                        if os.path.isdir(temp_dir):
                            shutil.rmtree(temp_dir)
            else:
                err_msg = "Not a valid NetCDF file. File type file validation failed."
                log.error(err_msg)
                # remove temp dir
                if os.path.isdir(temp_dir):
                    shutil.rmtree(temp_dir)
                raise ValidationError(err_msg)
Пример #54
0
    def setUp(self):
        super(TestCopyResource, self).setUp()
        self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')

        # create a user who is the owner of the resource to be copied
        self.owner = hydroshare.create_account('*****@*****.**',
                                               username='******',
                                               first_name='owner_firstname',
                                               last_name='owner_lastname',
                                               superuser=False,
                                               groups=[])

        # create a user who is NOT the owner of the resource to be copied
        self.nonowner = hydroshare.create_account(
            '*****@*****.**',
            username='******',
            first_name='nonowner_firstname',
            last_name='nonowner_lastname',
            superuser=False,
            groups=[])

        # create a generic resource
        self.res_generic = hydroshare.create_resource(
            resource_type='GenericResource',
            owner=self.owner,
            title='Test Generic Resource')

        test_file1 = open('test1.txt', 'w')
        test_file1.write("Test text file in test1.txt")
        test_file1.close()
        test_file2 = open('test2.txt', 'w')
        test_file2.write("Test text file in test2.txt")
        test_file2.close()
        self.test_file1 = open('test1.txt', 'r')
        self.test_file2 = open('test2.txt', 'r')

        hydroshare.add_resource_files(self.res_generic.short_id,
                                      self.test_file1, self.test_file2)

        # create a generic empty resource with one license that prohibits derivation
        statement = 'This resource is shared under the Creative Commons Attribution-NoDerivs CC ' \
                    'BY-ND.'
        url = 'http://creativecommons.org/licenses/by-nd/4.0/'
        metadata = []
        metadata.append({'rights': {'statement': statement, 'url': url}})
        self.res_generic_lic_nd = hydroshare.create_resource(
            resource_type='GenericResource',
            owner=self.owner,
            title='Test Generic Resource',
            metadata=metadata)

        # create a generic empty resource with another license that prohibits derivation
        statement = 'This resource is shared under the Creative Commons ' \
                    'Attribution-NoCommercial-NoDerivs CC BY-NC-ND.'
        url = 'http://creativecommons.org/licenses/by-nc-nd/4.0/'
        metadata = []
        metadata.append({'rights': {'statement': statement, 'url': url}})
        self.res_generic_lic_nc_nd = hydroshare.create_resource(
            resource_type='GenericResource',
            owner=self.owner,
            title='Test Generic Resource',
            metadata=metadata)

        # create a raster resource that represents a specific resource type
        raster_file = 'hs_core/tests/data/cea.tif'
        temp_dir = tempfile.mkdtemp()
        self.temp_raster_file = os.path.join(temp_dir, 'cea.tif')
        shutil.copy(raster_file, self.temp_raster_file)
        self.raster_obj = open(self.temp_raster_file, 'r')
        files = [UploadedFile(file=self.raster_obj, name='cea.tif')]
        self.res_raster = hydroshare.create_resource(
            resource_type='RasterResource',
            owner=self.owner,
            title='Test Raster Resource',
            files=files,
            metadata=[])
        # call the post creation process here for the metadata to be
        # extracted
        utils.resource_post_create_actions(resource=self.res_raster,
                                           user=self.owner,
                                           metadata=[])