Exemple #1
0
def task_join_catalogs(job_id):
    """Union of unique rows in two catalogs"""
    job = JoinCatalogs.objects.get(pk=job_id)
    job.job_status = 'started'
    job.save()

    if default_storage.exists(job.left_table.handle.name):
        left = csv.DictReader(default_storage.open(job.left_table.handle.name))
    if default_storage.exists(job.right_table.handle.name):
        right = csv.DictReader(default_storage.open(job.right_table.handle.name))
    keys = set(left.fieldnames + right.fieldnames)
    left = [r for r in left]
    right = [r for r in right]

    joinedCatalogs, columns = join(left, right, job.fk_field)
    path = os.path.join(BASE_DIR, MEDIA_ROOT, 'catalogs', '{}.csv'.format(job.results_label))
    handler = open(path,'w')
    handler = csv.DictWriter(handler, fieldnames=keys)
    handler.writeheader()
    handler.writerows(joinedCatalogs)

    job.completed = 1
    job.results_handle = 'catalogs/{}.csv'.format(job.results_label)

    result = Catalog(
        name=job.results_label,
        handle='catalogs/{}.csv'.format(job.results_label)
    )
    result.save()
    job.complete = True
    job.completed_date = timezone.now()
    job.job_status = 'complete'
    job.save()
    return True
Exemple #2
0
def create_persona_preview_images(src, full_dst, **kw):
    """
    Creates a 680x100 thumbnail used for the Persona preview and
    a 32x32 thumbnail used for search suggestions/detail pages.
    """
    log.info("[1@None] Resizing persona images: %s" % full_dst)
    preview, full = amo.PERSONA_IMAGE_SIZES["header"]
    preview_w, preview_h = preview
    orig_w, orig_h = full
    with storage.open(src) as fp:
        i_orig = i = Image.open(fp)

        # Crop image from the right.
        i = i.crop((orig_w - (preview_w * 2), 0, orig_w, orig_h))

        # Resize preview.
        i = i.resize(preview, Image.ANTIALIAS)
        i.load()
        with storage.open(full_dst[0], "wb") as fp:
            i.save(fp, "png")

        _, icon_size = amo.PERSONA_IMAGE_SIZES["icon"]
        icon_w, icon_h = icon_size

        # Resize icon.
        i = i_orig
        i.load()
        i = i.crop((orig_w - (preview_h * 2), 0, orig_w, orig_h))
        i = i.resize(icon_size, Image.ANTIALIAS)
        i.load()
        with storage.open(full_dst[1], "wb") as fp:
            i.save(fp, "png")
    return True
Exemple #3
0
def sign_app(src, dest, reviewer=False):
    """
    Generate a manifest and signature and send signature to signing server to
    be signed.
    """
    active_endpoint = _get_endpoint(reviewer)
    timeout = settings.SIGNED_APPS_SERVER_TIMEOUT

    if not active_endpoint:
        _no_sign(src, dest)
        return

    # Extract necessary info from the archive
    try:
        jar = JarExtractor(
            storage.open(src, 'r'), storage.open(dest, 'w'),
            omit_signature_sections=settings.SIGNED_APPS_OMIT_PER_FILE_SIGS)
    except:
        log.error('Archive extraction failed. Bad archive?', exc_info=True)
        raise SigningError('Archive extraction failed. Bad archive?')

    log.info('App signature contents: %s' % jar.signatures)

    log.info('Calling service: %s' % active_endpoint)
    try:
        with statsd.timer('services.sign.app'):
            response = requests.post(active_endpoint, timeout=timeout,
                                     files={'file': ('zigbert.sf',
                                                     str(jar.signatures))})
    except requests.exceptions.HTTPError, error:
        # Will occur when a 3xx or greater code is returned.
        log.error('Posting to app signing failed: %s, %s' % (
            error.response.status, error))
        raise SigningError('Posting to app signing failed: %s, %s' % (
            error.response.status, error))
Exemple #4
0
def rezip_file(response, pk):
    # An .xpi does not have a directory inside the zip, yet zips from github
    # do, so we'll need to rezip the file before passing it through to the
    # validator.
    loc = os.path.join(user_media_path('addons'), 'temp', uuid.uuid4().hex)
    old_filename = '{}_github_webhook.zip'.format(pk)
    old_path = os.path.join(loc, old_filename)

    with storage.open(old_path, 'wb') as old:
        old.write(response.content)

    new_filename = '{}_github_webhook.xpi'.format(pk)
    new_path = os.path.join(loc, new_filename)

    old_zip = SafeZip(old_path)
    if not old_zip.is_valid:
        raise

    with storage.open(new_path, 'w') as new:
        new_zip = zipfile.ZipFile(new, 'w')

        for obj in old_zip.filelist:
            # Basically strip off the leading directory.
            new_filename = obj.filename.partition('/')[-1]
            if not new_filename:
                continue
            new_zip.writestr(new_filename, old_zip.read(obj.filename))

        new_zip.close()

    old_zip.close()
    return new_path
Exemple #5
0
def _uploader(resize_size, final_size):
    img = get_image_path('mozilla.png')
    original_size = (339, 128)

    for rsize, fsize in zip(resize_size, final_size):
        dest_name = os.path.join(settings.ADDON_ICONS_PATH, '1234')
        src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix='.png',
                                          delete=False)
        # resize_icon removes the original, copy it to a tempfile and use that.
        shutil.copyfile(img, src.name)
        # Sanity check.
        with storage.open(src.name) as fp:
            src_image = Image.open(fp)
            src_image.load()
        eq_(src_image.size, original_size)

        val = tasks.resize_icon(src.name, dest_name, resize_size, locally=True)
        eq_(val, {'icon_hash': 'bb362450'})
        with storage.open('%s-%s.png' % (dest_name, rsize)) as fp:
            dest_image = Image.open(fp)
            dest_image.load()

        # Assert that the width is always identical.
        eq_(dest_image.size[0], fsize[0])
        # Assert that the height can be a wee bit fuzzy.
        assert -1 <= dest_image.size[1] - fsize[1] <= 1, (
            'Got width %d, expected %d' % (
                fsize[1], dest_image.size[1]))

        if os.path.exists(dest_image.filename):
            os.remove(dest_image.filename)
        assert not os.path.exists(dest_image.filename)

    assert not os.path.exists(src.name)
Exemple #6
0
    def delete_or_disable_related_content_exclude_addons_with_other_devs(self):
        addon = Addon.objects.latest('pk')
        user = UserProfile.objects.get(pk=55021)
        user.update(picture_type='image/png')
        AddonUser.objects.create(addon=addon, user=user_factory())

        # Create a photo so that we can test deletion.
        with storage.open(user.picture_path, 'wb') as fobj:
            fobj.write('test data\n')

        with storage.open(user.picture_path_original, 'wb') as fobj:
            fobj.write('original test data\n')

        assert user.addons.count() == 1
        rating = Rating.objects.create(
            user=user, addon=addon, version=addon.current_version)
        Rating.objects.create(
            user=user, addon=addon, version=addon.current_version,
            reply_to=rating)
        Collection.objects.create(author=user)

        # Now that everything is set up, disable/delete related content.
        user.delete_or_disable_related_content()

        # The add-on should not have been touched, it has another dev.
        assert user.addons.exists()
        addon.reload()
        assert addon.status == amo.STATUS_PUBLIC

        assert not user._ratings_all.exists()  # Even replies.
        assert not user.collections.exists()

        assert not storage.exists(user.picture_path)
        assert not storage.exists(user.picture_path_original)
    def save_model(self, request, obj, form, change):
        if change == False and request.POST.__contains__('_saveasnew'):  
            path_info = request.META['HTTP_REFERER']
            id = path_info.split('/')[-2:-1]    #old image id - table of size 1  
            s_file = StaticFile.objects.get(pk = int(id[0]))    
            path = generate_file_path(None, request.POST['filename'])
            old_path = s_file.static_file
            img_path = 'uploads/'+str(old_path)
            result = img_path #urllib.urlretrieve(img_path)   #uploads/folder/filename.ext

            if request.POST['crop_coords'] != "":
                crop_coords = map(int, request.POST['crop_coords'].split(','))
                file = default_storage.open(old_path)
                img = Image.open(file)
                cropped_img = img.crop((crop_coords[0], crop_coords[1], crop_coords[0]+ crop_coords[2], crop_coords[1] + crop_coords[3]))
                cropped_img.save(default_storage.path(path))
                obj.width, obj.height = cropped_img.size         
                obj.crop_coords = ''
            else:
                file = default_storage.open(old_path)
                img = Image.open(file)
                img.save(default_storage.path(path))
                obj.width, obj.height = img.size
            obj.static_file.save(path, File(open(default_storage.path(path))), save=True)

            obj.user = request.user
            obj.save()

        else:
            return super(FileAdmin, self).save_model(request, obj, form, change)   
Exemple #8
0
def dump_project(self, user, project, dump_format):
    try:
        if dump_format == "gzip":
            path = "exports/{}/{}-{}.json.gz".format(project.pk, project.slug, self.request.id)
            with default_storage.open(path, mode="wb") as outfile:
                services.render_project(project, gzip.GzipFile(fileobj=outfile))
        else:
            path = "exports/{}/{}-{}.json".format(project.pk, project.slug, self.request.id)
            with default_storage.open(path, mode="wb") as outfile:
                services.render_project(project, outfile)

        url = default_storage.url(path)

    except Exception:
        # Error
        ctx = {
            "user": user,
            "error_subject": _("Error generating project dump"),
            "error_message": _("Error generating project dump"),
            "project": project,
        }
        email = mail_builder.export_error(user, ctx)
        email.send()
        logger.error("Error generating dump %s (by %s)", project.slug, user, exc_info=sys.exc_info())
    else:
        # Success
        deletion_date = timezone.now() + datetime.timedelta(seconds=settings.EXPORTS_TTL)
        ctx = {"url": url, "project": project, "user": user, "deletion_date": deletion_date}
        email = mail_builder.dump_project(user, ctx)
        email.send()
Exemple #9
0
def ik_chsmoel(request):
    if not 'smoel' in request.FILES:
        raise ValueError, "Missing `smoel' in FILES"
    if not 'id' in request.POST:
        raise ValueError, "Missing `id' in POST"
    user = Es.by_id(request.POST['id'])
    if not request.user.may_upload_smoel_for(request.user):
        raise PermissionDenied
    original = default_storage.open(path.join(settings.SMOELEN_PHOTOS_PATH,
            str(user.name)) + ".orig", 'wb+')
    for chunk in request.FILES['smoel'].chunks():
        original.write(chunk)
    original.seek(0)
    img = Image.open(original)
    if img._getexif() is not None:
        orientation = int(img._getexif().get(274, '1')) # Orientation
        if orientation == 3:
            img = img.transpose(Image.ROTATE_180)
        elif orientation == 6:
            img = img.transpose(Image.ROTATE_270)
        elif orientation == 8:
            img = img.transpose(Image.ROTATE_90)
    width, height = resize_proportional(img.size[0], img.size[1],
                                        settings.SMOELEN_WIDTH*2,
                                        settings.SMOELEN_HEIGHT*2)
    img = img.resize((width, height), Image.ANTIALIAS)
    img.save(default_storage.open(path.join(settings.SMOELEN_PHOTOS_PATH,
            str(user.name)) + ".jpg", 'w'), "JPEG")
    Es.notify_informacie('set_smoel', request.user, entity=user)
    return redirect_to_referer(request)
Exemple #10
0
def thumbnail(file, size="200x200"):
    # defining the size
    x, y = [int(x) for x in size.split("x")]
    # defining the filename and the miniature filename
    filehead, filetail = os.path.split(file.name)
    basename, format = os.path.splitext(filetail)
    miniature = basename + "_" + size + format
    filename = file.name
    miniature_filename = os.path.join(filehead, miniature)
    filehead, filetail = os.path.split(file.url)
    miniature_url = filehead + "/" + miniature

    thumbnail_exist = False
    if default_storage.exists(miniature_filename):
        mt_filename = default_storage.modified_time(filename)
        mt_miniature_filename = default_storage.modified_time(miniature_filename)
        if mt_filename > mt_miniature_filename:
            # remove the miniature
            default_storage.delete(miniature_filename)
        else:
            thumbnail_exist = True

    # if the image wasn't already resized, resize it
    if not thumbnail_exist:
        # image = Image.open(filename)
        image = Image.open(default_storage.open(filename))
        image.thumbnail([x, y], Image.ANTIALIAS)

        f = default_storage.open(miniature_filename, "w")
        image.save(f, image.format, quality=90, optimize=1)
        f.close()

    return miniature_url
def create_perseus_zip(ccnode, exercise_data, write_to_path):
    with zipfile.ZipFile(write_to_path, "w") as zf:
        try:
            exercise_context = {
                'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
            }
            exercise_result = render_to_string('perseus/exercise.json', exercise_context)
            write_to_zipfile("exercise.json", exercise_result, zf)

            for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'):
                try:
                    for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'):
                        image_name = "images/{}.{}".format(image.checksum, image.file_format_id)
                        if image_name not in zf.namelist():
                            with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
                                write_to_zipfile(image_name, content.read(), zf)

                    for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'):
                        svg_name = "images/{0}.svg".format(image.original_filename)
                        json_name = "images/{0}-data.json".format(image.original_filename)
                        if svg_name not in zf.namelist() or json_name not in zf.namelist():
                            with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
                                content = content.read()
                                content = content.split(exercises.GRAPHIE_DELIMITER)
                                write_to_zipfile(svg_name, content[0], zf)
                                write_to_zipfile(json_name, content[1], zf)
                    write_assessment_item(question, zf)
                except Exception as e:
                    logging.error("Publishing error: {}".format(str(e)))

        finally:
            zf.close()
Exemple #12
0
    def test_doesnt_remove_non_empty_directories(self):
        # Add an extra disabled file. The approved one should move, but not the
        # other, so the directory should be left intact.
        self.disabled_file = file_factory(
            version=self.version, status=amo.STATUS_DISABLED)
        self.addon.update(status=amo.STATUS_APPROVED)
        self.file_.update(status=amo.STATUS_APPROVED)
        with storage.open(self.file_.guarded_file_path, 'wb') as fp:
            fp.write(b'content')
        assert not storage.exists(self.file_.file_path)
        assert storage.exists(self.file_.guarded_file_path)
        with storage.open(self.disabled_file.guarded_file_path, 'wb') as fp:
            fp.write(b'disabled content')
        assert not storage.exists(self.disabled_file.file_path)
        assert storage.exists(self.disabled_file.guarded_file_path)

        cron.unhide_disabled_files()

        assert storage.exists(self.file_.file_path)
        assert not storage.exists(self.file_.guarded_file_path)

        # The disabled file shouldn't have moved.
        assert not storage.exists(self.disabled_file.file_path)
        assert storage.exists(self.disabled_file.guarded_file_path)
        # The directory in guarded file path should still exist.
        assert storage.exists(os.path.dirname(self.file_.guarded_file_path))
Exemple #13
0
	def process_image(self):
		# check for orientation before doing that !
		with Image.open(storage.open(self.source.name)) as source :
			source.thumbnail((settings.THUMBNAIL_WIDTH, settings.THUMBNAIL_HEIGHT))
			f = BytesIO()
			source.save(f, format='png')
			self.thumbnail.save(self.source.name, ContentFile(f.getvalue()))
			self.save()
			f.close()

		with Image.open(storage.open(self.source.name)) as source :
			source.thumbnail((settings.RATIONALIZED_WIDTH, settings.RATIONALIZED_HEIGHT))
			f = BytesIO()
			source.save(f, format='png')
			self.main.save(self.source.name, ContentFile(f.getvalue()))
			self.save()
			f.close()

		with Image.open(storage.open(self.source.name)) as source :
			try : self.date = source._getexif()[306]
			except TypeError: pass 

#		storage.delete(self.source.name)
		self.source = None
		self.save()
Exemple #14
0
def sign_app(src, dest):
    if settings.SIGNED_APPS_SERVER_ACTIVE:
        # At some point this will be implemented, but not now.
        raise NotImplementedError

    if not os.path.exists(settings.SIGNED_APPS_KEY):
        # TODO: blocked on bug 793876
        # This is a temporary copy that will be unsigned and ignores storage
        # etc.
        # raise ValueError('The signed apps key cannot be found.')
        dest_dir = os.path.dirname(dest)
        if not os.path.exists(dest_dir):
            os.makedirs(dest_dir)
        shutil.copy(src, dest)
        return

    # TODO: stop doing this and use the signing server.
    try:
        # Not sure this will work too well on S3.
        xpisign(storage.open(src, 'r'), settings.SIGNED_APPS_KEY,
                storage.open(dest, 'w'), optimize_signatures=True,
                omit_sf_entry_sections=True, omit_created_by=True)
    except:
        # TODO: figure out some likely errors that can occur.
        log.error('Signing failed', exc_info=True)
        raise
Exemple #15
0
    def retrieve(self, request, pk, *args, **kwargs):
        throttle = throttling.ImportDumpModeRateThrottle()

        if not throttle.allow_request(request, self):
            self.throttled(request, throttle.wait())

        project = get_object_or_404(self.get_queryset(), pk=pk)
        self.check_permissions(request, 'export_project', project)

        dump_format = request.QUERY_PARAMS.get("dump_format", "plain")

        if settings.CELERY_ENABLED:
            task = tasks.dump_project.delay(request.user, project, dump_format)
            tasks.delete_project_dump.apply_async((project.pk, project.slug, task.id, dump_format),
                                                  countdown=settings.EXPORTS_TTL)
            return response.Accepted({"export_id": task.id})

        if dump_format == "gzip":
            path = "exports/{}/{}-{}.json.gz".format(project.pk, project.slug, uuid.uuid4().hex)
            storage_path = default_storage.path(path)
            with default_storage.open(storage_path, mode="wb") as outfile:
                services.render_project(project, gzip.GzipFile(fileobj=outfile))
        else:
            path = "exports/{}/{}-{}.json".format(project.pk, project.slug, uuid.uuid4().hex)
            storage_path = default_storage.path(path)
            with default_storage.open(storage_path, mode="wb") as outfile:
                services.render_project(project, outfile)

        response_data = {
            "url": default_storage.url(path)
        }
        return response.Ok(response_data)
Exemple #16
0
	def generateFile(self):
		wav = default_storage.open('songs/' + str(self.pk) + '.wav', 'wb')

		final = None

		pitches = map(int, self.pitches.split(','))
		durations = map(int, self.durations.split(','))
		for pitch, duration in zip(pitches, durations):
			fn = 'pitches/' + pitchTable[pitch] + '.wav'
			pf = default_storage.open(fn)
			if final is None:
				final = AudioSegment(pf)[0:durationTable[duration]]
			else:
				final += AudioSegment(pf)[0:durationTable[duration]]

		# Copied from AudioSegment source...
		# I should have changed AudioSegment (getWaveFileContents() or something) and submitted a pull request but I have a deadline

		# Possibly optimize to just have a string packed with data then use ContentFile instead of File below
		wave_data = wave.open(wav, 'wb')
		wave_data.setnchannels(final.channels)
		wave_data.setsampwidth(final.sample_width)
		wave_data.setframerate(final.frame_rate)
		wave_data.setnframes(int(final.frame_count()))
		wave_data.writeframesraw(final._data)
		wave_data.close()
		wav.close() # ?

		wav_rb = default_storage.open('songs/' + str(self.pk) + '.wav', 'rb')
		self.wav.save('songs/' + str(self.pk) + '.wav', File(wav_rb))
		wav_rb.close()
Exemple #17
0
    def delete_or_disable_related_content_actually_delete(self):
        addon = Addon.objects.latest('pk')
        user = UserProfile.objects.get(pk=55021)
        user.update(picture_type='image/png')

        # Create a photo so that we can test deletion.
        with storage.open(user.picture_path, 'wb') as fobj:
            fobj.write('test data\n')

        with storage.open(user.picture_path_original, 'wb') as fobj:
            fobj.write('original test data\n')

        assert user.addons.count() == 1
        rating = Rating.objects.create(
            user=user, addon=addon, version=addon.current_version)
        Rating.objects.create(
            user=user, addon=addon, version=addon.current_version,
            reply_to=rating)
        Collection.objects.create(author=user)

        # Now that everything is set up, delete related content.
        user.delete_or_disable_related_content(delete=True)

        assert not user.addons.exists()

        assert not user._ratings_all.exists()  # Even replies.
        assert not user.collections.exists()

        assert not storage.exists(user.picture_path)
        assert not storage.exists(user.picture_path_original)
Exemple #18
0
def get_file(fileorpath):
    """Get a file-like object, whether given a FileUpload object or a path."""
    if hasattr(fileorpath, 'path'):  # FileUpload
        return storage.open(fileorpath.path)
    if hasattr(fileorpath, 'name'):
        return fileorpath
    return storage.open(fileorpath)
    def test_upload_sign_error_existing(self, sign_app_mock):
        sign_app_mock.side_effect = SigningError
        langpack = self.create_langpack()
        eq_(LangPack.objects.count(), 1)
        original_uuid = langpack.uuid
        original_file_path = langpack.file_path
        original_file_version = langpack.file_version
        original_version = langpack.version
        # create_langpack() doesn't create a fake file, let's add one.
        storage.open(langpack.file_path, 'w').close()

        upload = self.upload('langpack')
        with self.assertRaises(SigningError):
            LangPack.from_upload(upload, instance=langpack)
        # Test that we didn't delete the upload file
        ok_(storage.exists(upload.path))
        # Test that we didn't delete the existing filename or alter the
        # existing langpack in the database.
        eq_(LangPack.objects.count(), 1)
        langpack.reload()
        eq_(original_uuid, langpack.uuid)
        eq_(langpack.file_path, original_file_path)
        eq_(original_file_version, langpack.file_version)
        eq_(original_version, langpack.version)
        ok_(storage.exists(langpack.file_path))

        # Cleanup
        storage.delete(langpack.file_path)
Exemple #20
0
def ik_chsmoel(request):
    if not "smoel" in request.FILES:
        raise ValueError, _("Missende `smoel' in FILES")
    if not "id" in request.POST:
        raise ValueError, _("Missende `id' in POST")
    user = Es.by_id(request.POST["id"])
    if not user.name:
        raise ValueError, _("Entiteit heeft geen naam")
    if not request.user.may_upload_smoel_for(request.user):
        raise PermissionDenied
    original = default_storage.open(path.join(settings.SMOELEN_PHOTOS_PATH, str(user.name)) + ".orig", "wb+")
    for chunk in request.FILES["smoel"].chunks():
        original.write(chunk)
    original.seek(0)
    img = Image.open(original)
    if hasattr(img, "_getexif") and img._getexif() is not None:
        orientation = int(img._getexif().get(274, "1"))  # Orientation
        if orientation == 3:
            img = img.transpose(Image.ROTATE_180)
        elif orientation == 6:
            img = img.transpose(Image.ROTATE_270)
        elif orientation == 8:
            img = img.transpose(Image.ROTATE_90)
    width, height = resize_proportional(
        img.size[0], img.size[1], settings.SMOELEN_WIDTH * 2, settings.SMOELEN_HEIGHT * 2
    )
    img = img.resize((width, height), Image.ANTIALIAS)
    img.save(default_storage.open(path.join(settings.SMOELEN_PHOTOS_PATH, str(user.name)) + ".jpg", "w"), "JPEG")
    Es.notify_informacie("set_smoel", request.user, entity=user)
    return redirect_to_referer(request)
Exemple #21
0
 def test_unhide_disabled_files(self):
     f = File.objects.get()
     f.status = mkt.STATUS_PUBLIC
     with storage.open(f.guarded_file_path, 'wb') as fp:
         fp.write('some data\n')
     f.unhide_disabled_file()
     assert storage.exists(f.file_path)
     assert storage.open(f.file_path).size
Exemple #22
0
def _check_storage():
    filename = 'django-watchman-{}.txt'.format(uuid.uuid4())
    content = 'django-watchman test file'
    path = default_storage.save(filename, ContentFile(content))
    default_storage.size(path)
    default_storage.open(path).read()
    default_storage.delete(path)
    return {"ok": True}
Exemple #23
0
 def test_get(self):
     img = ('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej'
            '3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5C'
            'YII=').decode('base64')
     path = self.collection.image_path()
     storage.open(path, 'w').write(img)
     res = self.client.get(self.url)
     eq_(res['x-sendfile'], path)
Exemple #24
0
 def test_unhide_disabled_files(self):
     f = File.objects.get(pk=67442)
     f.status = amo.STATUS_PUBLIC
     with storage.open(f.guarded_file_path, "wb") as fp:
         fp.write("some data\n")
     f.unhide_disabled_file()
     assert storage.exists(f.file_path)
     assert storage.open(f.file_path).size
Exemple #25
0
 def test_inject_ids_replace(self, sign):
     zf = zipfile.ZipFile(self.file.file_path, mode='a')
     zf.writestr('META-INF/ids.json', '{}')
     zf.close()
     storage.open(self.file.signed_file_path, 'w')
     packaged.sign(self.version.pk, resign=True)
     zf = zipfile.ZipFile(self.file.file_path, mode='r')
     ids_data = zf.read('META-INF/ids.json')
     eq_(sorted(json.loads(ids_data).keys()), ['id', 'version'])
def test_put_and_delete():
    f = ContentFile(b'This is test content')
    filename = default_storage.save('deletion.txt', f)
    assert filename
    with default_storage.open(filename, 'r') as f:
        assert f.read() == 'This is test content'
    default_storage.delete(filename)
    with pytest.raises(HTTPError):
        default_storage.open(filename)
Exemple #27
0
    def load_cdx(self, query):
        """
            This function accepts a standard CDX request, except with a GUID instead of date, and returns a standard CDX 11 response.
        """
        guid = query.params['guid']
        url = query.url

        # We'll first check the key-value store to see if we cached the lookup for this guid on a previous request.
        # This will be common, since each playback triggers lots of requests for the same .warc file.
        cache_key = guid + '-surts'
        url_key = guid+'-url'
        surt_lookup = django_cache.get(cache_key)
        url = url or django_cache.get(url_key)
        if surt_lookup and url:
            surt_lookup = json.loads(surt_lookup)

        else:
            # nothing in cache; find requested link in database
            try:
                link = Link.objects.select_related().get(pk=guid)
            except Link.DoesNotExist:
                return []

            # cache url, which may be blank if this is the first request
            if not url:
                url = link.submitted_url
            django_cache.set(url_key, url, timeout=60*60)

            # get warc file
            for asset in link.assets.all():
                if '.warc' in asset.warc_capture:
                    warc_path = os.path.join(asset.base_storage_path, asset.warc_capture)
                    break
            else:
                return []  # no .warc file -- do something to handle this?

            # now we have to get an index of all the URLs in this .warc file
            # first try fetching it from a .cdx file on disk
            cdx_path = warc_path.replace('.gz', '').replace('.warc', '.cdx')

            if not default_storage.exists(cdx_path):
                # there isn't a .cdx file on disk either -- let's create it
                with default_storage.open(warc_path, 'rb') as warc_file, default_storage.open(cdx_path, 'wb') as cdx_file:
                    write_cdx_index(cdx_file, warc_file, warc_path, sort=True)

            # now load the URL index from disk and stick it in the cache
            cdx_lines = (line.strip() for line in default_storage.open(cdx_path, 'rb'))
            surt_lookup = dict((key, list(val)) for key, val in groupby(cdx_lines, key=lambda line: line.split(' ', 1)[0]))
            django_cache.set(cache_key, json.dumps(surt_lookup), timeout=60*60)

        # find cdx lines for url
        sorted_url = surt(url)
        if sorted_url in surt_lookup:
            return (str(i) for i in surt_lookup[sorted_url])

        # didn't find requested url in this archive
        return []
Exemple #28
0
    def get_thumbnail(self, image_data=None):
        if self.thumbnail_status == 'failed' or self.thumbnail_status == 'generating':
            return None

        thumbnail_path = os.path.join(settings.THUMBNAIL_STORAGE_PATH, self.guid_as_path(), 'thumbnail.png')

        if self.thumbnail_status == 'generated' and default_storage.exists(thumbnail_path):
            return default_storage.open(thumbnail_path)

        try:

            warc_url = None
            image = None

            if image_data:
                image = Image(blob=image_data)
            else:

                if self.screenshot_capture and self.screenshot_capture.status == 'success':
                    warc_url = self.screenshot_capture.url
                else:
                    pdf_capture = self.captures.filter(content_type__startswith='application/pdf').first()
                    if pdf_capture:
                        warc_url = pdf_capture.url

                if warc_url:
                    self.thumbnail_status = 'generating'
                    self.save(update_fields=['thumbnail_status'])

                    headers, data = self.replay_url(warc_url)
                    temp_file = tempfile.NamedTemporaryFile(suffix='.' + warc_url.rsplit('.', 1)[-1])
                    for chunk in data:
                        temp_file.write(chunk)
                    temp_file.flush()
                    image = Image(filename=temp_file.name + "[0]")  # [0] limits ImageMagick to first page of PDF

            if image:
                with imagemagick_temp_dir():
                    with image as opened_img:
                        opened_img.transform(resize='600')
                        # opened_img.resize(600,600)
                        with Image(width=600, height=600) as dst_image:
                            dst_image.composite(opened_img, 0, 0)
                            dst_image.compression_quality = 60
                            default_storage.store_data_to_file(dst_image.make_blob('png'), thumbnail_path, overwrite=True)

                self.thumbnail_status = 'generated'
                self.save(update_fields=['thumbnail_status'])

                return default_storage.open(thumbnail_path)

        except Exception as e:
            print "Thumbnail generation failed for %s: %s" % (self.guid, e)

        self.thumbnail_status = 'failed'
        self.save(update_fields=['thumbnail_status'])
Exemple #29
0
 def result(self):
     content1 = default_storage.open(self.output).read()
     content2 = default_storage.open(self.standard_output).read()
     content1 = content1.replace("\n", "").replace(" ", "")
     content2 = content2.replace("\n", "").replace(" ", "")
     print "Context =>", content1, content2, "<="
     if content1 == content2:
         return 0
     else:
         return -1
Exemple #30
0
 def test_get(self):
     if not settings.XSENDFILE:
         raise SkipTest
     img = ('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej'
            '3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5C'
            'YII=').decode('base64')
     path = self.collection.image_path()
     storage.open(path, 'w').write(img)
     res = self.client.get(self.url)
     eq_(res[settings.XSENDFILE_HEADER], path)
Exemple #31
0
def LoadUserProject(userproject_file, ownerid):
    """ given an uploaded Geopaparazzi UserProject
    extract the useful bits and load them to the database
    :param userproject_file: name of the sqlite3 file to be read
    :param ownerid: id of the file owner
    :type arg1: string
    :type arg2: int
    :rtype: None

    Since the userproject file and the images extracted from it may be managed by the Django-storages module (Boto)
    we have to take care to make local copies of all files accessed.

    Also, since this task is intended for asynchronous execution via Celery, the calling parameters cannot be
    model instances (they are not JSON serializable!), so any model references have to be passed using primary keys
    """
    # before we can open the database file, it must be copied locally!
    document = default_storage.open(userproject_file, 'rb')
    userproject = tempfile.NamedTemporaryFile(delete=False)
    # this might be a memory problem!
    data = document.read()
    userproject.write(data)
    userproject.close()

    # get the owner from the ownerid
    User = get_user_model()
    owner = User.objects.get(id=ownerid)

    # connect to the database
    conn = sqlite3.connect(userproject.name)
    conn.row_factory = sqlite3.Row
    c = conn.cursor()

    if tableAvailability(c, 'gpslogs'):
        # import gpstracks if any
        for gpslog in c.execute('SELECT * FROM gpslogs;'):
            log_dict = dict(gpslog)
            rcd = TrackFeature(owner=owner, text=log_dict['text'])
            rcd.timestamp_start = datetime.utcfromtimestamp(log_dict['startts']/1000).replace(tzinfo=timezone.utc)
            rcd.timestamp_end = datetime.utcfromtimestamp(log_dict['endts']/1000).replace(tzinfo=timezone.utc)
            rcd.lengthm = log_dict['lengthm']
            d = conn.cursor()
            plist = []
            for pt in d.execute('SELECT * FROM gpslogsdata WHERE logid=? ORDER BY ts ASC', (log_dict['_id'],)):
                pt_dict = dict(pt)
                plist.append(Point(pt_dict['lon'], pt_dict['lat']))
            rcd.linestring = LineString(plist)
            rcd.save()
            d.close()

    if tableAvailability(c, 'notes'):
        # import notes and images together in order to preserve relationships
        for nt in c.execute('SELECT * FROM notes;'):
            nt_dict = dict(nt)
            rcd = Note(owner=owner, text= nt_dict['text'], form = nt_dict['form'])
            rcd.timestamp = datetime.utcfromtimestamp(nt_dict['ts']/1000).replace(tzinfo=timezone.utc)
            rcd.description = nt_dict['description']
            rcd.lat = nt_dict['lat']
            rcd.lon = nt_dict['lon']
            rcd.location = Point(rcd.lon, rcd.lat)
            rcd.altitude = nt_dict['altim']
            rcd.save()  # save the Note here so that we can refer to it when creating ImageNote records
            d = conn.cursor()
            # Import all Images linked to the current Note
            # Design Note:  presumes ImageNote records are _always_ referenced by a Note
            #               unreferenced records will not be imported
            if tableAvailability(d, 'images'):
                for im in d.execute('SELECT * FROM images WHERE note_id=?;', (nt_dict['_id'],)):
                    im_dict = dict(im)
                    imgrcd = ImageNote(owner=owner, note=rcd, azimuth=im_dict['azim'])
                    # Note that ImageNote records have time and location distinct from the Note
                    imgrcd.timestamp = datetime.utcfromtimestamp(im_dict['ts']/1000).replace(tzinfo=timezone.utc)
                    imgrcd.lat = im_dict['lat']
                    imgrcd.lon = im_dict['lon']
                    imgrcd.location = Point(imgrcd.lon, imgrcd.lat)
                    imgrcd.altitude = im_dict['altim']
                    e = conn.cursor()
                    e.execute('SELECT * FROM imagedata WHERE _id=?;', (im_dict['_id'],))
                    img = e.fetchone()
                    img_dict = dict(img)
                    # save the full image locally - this should probably be put in a temp directory
                    blob = img_dict['data']
                    local_filename = im_dict['text']
                    with open(local_filename, 'wb') as output_file:
                        output_file.write(blob)

                    # Rotate the image if an orientation tag is available
                    try:
                        image = Image.open(local_filename)
                        for orientation in ExifTags.TAGS.keys():
                            if ExifTags.TAGS[orientation] == 'Orientation':
                                break
                        exif = dict(image._getexif().items())

                        if exif[orientation] == 3:
                            image = image.rotate(180, expand=True)
                        elif exif[orientation] == 6:
                            image = image.rotate(270, expand=True)
                        elif exif[orientation] == 8:
                            image = image.rotate(90, expand=True)
                        image.save(local_filename)
                        image.close()

                    except (AttributeError, KeyError, IndexError):
                        # cases: image don't have getexif
                        pass

                    qf = open(local_filename, 'rb')
                    imgrcd.image = File(qf)
                    # the thumbnail - also should be placed in a temp directory
                    blob = img_dict['thumbnail']
                    thmname = 'thm_{0}'.format(local_filename)
                    with open(thmname, 'wb') as output_file:
                        output_file.write(blob)
                    qt = open(thmname, 'rb')
                    imgrcd.thumbnail = File(qt)
                    # save the newly created image record
                    imgrcd.save()
                    # clean up temporary image files
                    qf.close()
                    try:
                        os.remove(local_filename)
                    except OSError as err:
                        pass

                    qt.close()
                    try:
                        os.remove(thmname)
                    except OSError as err:
                        pass

    # clean up the temporary sqlite3 file
    userproject.close()
    try:
        os.remove(userproject.name)
    except OSError as err:
        pass
Exemple #32
0
def upload_to_internet_archive(self, link_guid):
    link = Link.objects.get(guid=link_guid)

    if not settings.UPLOAD_TO_INTERNET_ARCHIVE:
        return

    if not link.can_upload_to_internet_archive():
        print "Not eligible for upload."
        return

    metadata = {
        "collection":
        settings.INTERNET_ARCHIVE_COLLECTION,
        "title":
        '%s: %s' % (link_guid, truncatechars(link.submitted_title, 50)),
        "mediatype":
        'web',
        "description":
        'Perma.cc archive of %s created on %s.' % (
            link.submitted_url,
            link.creation_timestamp,
        ),
        "contributor":
        'Perma.cc',
        "submitted_url":
        link.submitted_url,
        "perma_url":
        "http://%s/%s" % (settings.HOST, link_guid),
        "external-identifier":
        'urn:X-perma:%s' % link_guid,
    }

    # set sponsor if organization exists
    if link.organization:
        metadata["sponsor"] = "%s - %s" % (link.organization,
                                           link.organization.registrar)

    identifier = settings.INTERNET_ARCHIVE_IDENTIFIER_PREFIX + link_guid
    with default_storage.open(link.warc_storage_file(), 'rb') as warc_file:
        success = internetarchive.upload(
            identifier,
            warc_file,
            access_key=settings.INTERNET_ARCHIVE_ACCESS_KEY,
            secret_key=settings.INTERNET_ARCHIVE_SECRET_KEY,
            retries=10,
            retries_sleep=60,
            verbose=True,
        )

        if success:
            internetarchive.modify_metadata(
                identifier,
                metadata=metadata,
            )

            link.uploaded_to_internet_archive = True
            link.save()

        else:
            self.retry(
                exc=Exception("Internet Archive reported upload failure."))
            print "Failed."

        return success
Exemple #33
0
 def test_from_post_write_file(self):
     assert storage.open(self.upload().path).read() == self.data
Exemple #34
0
    def test_cachedir_tag(self):
        self.assertTrue(default_storage.exists(PHOTOLOGUE_CACHEDIRTAG))

        content = default_storage.open(PHOTOLOGUE_CACHEDIRTAG).read()
        self.assertEqual(content,
                         b"Signature: 8a477f597d28d172789f06886806bc55")
Exemple #35
0
 def read(self, read_mode='rb'):
     with default_storage.open(self.get_full_path(), mode=read_mode) as f:
         return f.read()
Exemple #36
0
 def stash_json(self):
     with storage.open(self._stash_path, 'r') as json_file:
         return json.load(json_file)
Exemple #37
0
def process_export(export_fields='all_fields', export_status_detail='',
                   identifier=u'', user_id=0):
    from tendenci.apps.perms.models import TendenciBaseModel

    if export_fields == 'main_fields':
        field_list = [
            'headline',
            'slug',
            'summary',
            'body',
            'source',
            'first_name',
            'last_name',
            'address',
            'address2',
            'city',
            'state',
            'zip_code',
            'country',
            'phone',
            'phone2',
            'fax',
            'email',
            'email2',
            'website',
            'list_type',
            'requested_duration',
            'activation_dt',
            'expiration_dt',
            'tags',
            'enclosure_url',
            'enclosure_type',
            'enclosure_length',
            'status',
            'status_detail']
    else:
        # base ------------
        base_field_list = [
            smart_str(field.name) for field in TendenciBaseModel._meta.fields
            if not field.__class__ == AutoField]

        field_list = [
            smart_str(field.name) for field in Directory._meta.fields
            if not field.__class__ == AutoField]
        field_list = [
            name for name in field_list
            if name not in base_field_list]
        field_list.remove('guid')
        # append base fields at the end
        field_list = field_list + base_field_list

    identifier = identifier or int(ttime.time())
    file_name_temp = 'export/directories/%s_temp.csv' % identifier

    with default_storage.open(file_name_temp, 'wb') as csvfile:
        csv_writer = UnicodeWriter(csvfile, encoding='utf-8')
        fields_names = list(field_list)
        for i, item in enumerate(fields_names):
            if item == 'headline':
                fields_names[i] = 'name'
            if item == 'body':
                fields_names[i] = 'description'
        csv_writer.writerow(fields_names)

        directories = Directory.objects.all()
        if export_status_detail:
            directories = directories.filter(status_detail__icontains=export_status_detail)
        for directory in directories:
            items_list = []
            for field_name in field_list:
                item = getattr(directory, field_name)
                if item is None:
                    item = ''
                if item:
                    if isinstance(item, datetime):
                        item = item.strftime('%Y-%m-%d %H:%M:%S')
                    elif isinstance(item, date):
                        item = item.strftime('%Y-%m-%d')
                    elif isinstance(item, time):
                        item = item.strftime('%H:%M:%S')
                    elif isinstance(item, basestring):
                        item = item.encode("utf-8")
                    elif field_name == 'invoice':
                        # display total vs balance
                        item = 'Total: %d / Balance: %d' % (item.total, item.balance)
                item = smart_str(item).decode('utf-8')
                items_list.append(item)
            csv_writer.writerow(items_list)

    # rename the file name
    file_name = 'export/directories/%s.csv' % identifier
    default_storage.save(file_name, default_storage.open(file_name_temp, 'rb'))

    # delete the temp file
    default_storage.delete(file_name_temp)

    # notify user that export is ready to download
    [user] = User.objects.filter(pk=user_id)[:1] or [None]
    if user and user.email:
        download_url = reverse('directory.export_download', args=[identifier])

        site_url = get_setting('site', 'global', 'siteurl')
        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        parms = {
            'download_url': download_url,
            'user': user,
            'site_url': site_url,
            'site_display_name': site_display_name,
            'export_status_detail': export_status_detail,
            'export_fields': export_fields}

        subject = render_to_string(
            'directories/notices/export_ready_subject.html', parms)
        subject = subject.strip('\n').strip('\r')

        body = render_to_string(
            'directories/notices/export_ready_body.html', parms)

        email = Email(
            recipient=user.email,
            subject=subject,
            body=body)
        email.send()
Exemple #38
0
def call_signing(file_obj):
    """Sign `file_obj` via autographs /sign/file endpoint.

    :returns: The certificates serial number.
    """
    conf = settings.AUTOGRAPH_CONFIG

    with storage.open(file_obj.current_file_path) as fobj:
        input_data = force_str(b64encode(fobj.read()))

    signing_data = {
        'input': input_data,
        'keyid': conf['signer'],
        'options': {
            'id': get_id(file_obj.version.addon),
            # "Add-on variant A params (PKCS7 SHA1 and COSE ES256) work in
            # Fx <57, so we can switch to that without breaking backwards
            # compatibility"
            # https://github.com/mozilla/addons-server/issues/9308
            # This means, the pkcs7 sha1 signature is used for backwards
            # compatibility and cose sha256 will be used for newer
            # Firefox versions.
            # The relevant pref in Firefox is
            # "security.signed_app_signatures.policy"
            # where it's set to COSEAndPKCS7WithSHA1OrSHA256 to match
            # these settings.
            'pkcs7_digest': 'SHA1',
            'cose_algorithms': ['ES256'],
        },
    }

    hawk_auth = HawkAuth(id=conf['user_id'], key=conf['key'])

    # We are using a separate signer that adds the mozilla-recommendation.json
    # file.
    promo_group = file_obj.addon.promoted_group(currently_approved=False)
    if use_promoted_signer(file_obj, promo_group):
        signing_states = {
            promo_group.autograph_signing_states.get(app.short)
            for app in file_obj.addon.promotedaddon.all_applications
        }

        signing_data['keyid'] = conf['recommendation_signer']
        signing_data['options']['recommendations'] = list(signing_states)
        hawk_auth = HawkAuth(
            id=conf['recommendation_signer_user_id'],
            key=conf['recommendation_signer_key'],
        )

    with statsd.timer('services.sign.addon.autograph'):
        response = requests.post(
            '{server}/sign/file'.format(server=conf['server_url']),
            json=[signing_data],
            auth=hawk_auth,
        )

    if response.status_code != requests.codes.CREATED:
        msg = f'Posting to add-on signing failed ({response.status_code})'
        log.error(msg,
                  extra={
                      'reason': response.reason,
                      'text': response.text
                  })
        raise SigningError(msg)

    # Save the returned file in our storage.
    with storage.open(file_obj.current_file_path, 'wb') as fobj:
        fobj.write(b64decode(response.json()[0]['signed_file']))

    # Now fetch the certificates serial number. Future versions of
    # autograph may return this in the response.
    # https://github.com/mozilla-services/autograph/issues/214
    # Now extract the file and fetch the pkcs signature
    with zipfile.ZipFile(file_obj.current_file_path, mode='r') as zip_fobj:
        return get_signer_serial_number(
            zip_fobj.read(os.path.join('META-INF', 'mozilla.rsa')))
Exemple #39
0
def process_export(identifier, user_id):
    field_list = [
        'guid',
        'slug',
        'timezone',
        'headline',
        'summary',
        'body',
        'source',
        'first_name',
        'last_name',
        'phone',
        'fax',
        'email',
        'website',
        'release_dt',
        'syndicate',
        'featured',
        'design_notes',
        'tags',
        'enclosure_url',
        'enclosure_type',
        'enclosure_length',
        'not_official_content',
        'entity',
    ]

    identifier = identifier or int(ttime.time())
    file_name_temp = 'export/articles/%s_temp.csv' % (identifier)

    with default_storage.open(file_name_temp, 'wb') as csvfile:
        csv_writer = UnicodeWriter(csvfile, encoding='utf-8')
        csv_writer.writerow(field_list)

        articles = Article.objects.filter(status_detail='active')

        for article in articles:
            items_list = []
            for field_name in field_list:
                item = getattr(article, field_name)

                if isinstance(item, datetime):
                    item = item.strftime('%Y-%m-%d %H:%M:%S')
                elif isinstance(item, date):
                    item = item.strftime('%Y-%m-%d')
                elif isinstance(item, time):
                    item = item.strftime('%H:%M:%S')
                elif isinstance(item, basestring):
                    item = item.encode("utf-8")
                item = smart_str(item).decode('utf-8')
                items_list.append(item)
            csv_writer.writerow(items_list)

    # rename the file name
    file_name = 'export/articles/%s.csv' % identifier
    default_storage.save(file_name, default_storage.open(file_name_temp, 'rb'))

    # delete the temp file
    default_storage.delete(file_name_temp)

    # notify user that export is ready to download
    [user] = User.objects.filter(pk=user_id)[:1] or [None]
    if user and user.email:
        download_url = reverse('article.export_download', args=[identifier])

        site_url = get_setting('site', 'global', 'siteurl')
        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        parms = {
            'download_url': download_url,
            'user': user,
            'site_url': site_url,
            'site_display_name': site_display_name
        }

        subject = render_to_string(
            'articles/notices/export_ready_subject.html', parms)
        subject = subject.strip('\n').strip('\r')

        body = render_to_string('articles/notices/export_ready_body.html',
                                parms)

        email = Email(recipient=user.email, subject=subject, body=body)
        email.send()
Exemple #40
0
def thumbnail(image_url,
              width,
              height,
              upscale=True,
              quality=95,
              left=.5,
              top=.5,
              padding=False,
              padding_color="#fff"):
    """
    Given the URL to an image, resizes the image using the given width
    and height on the first time it is requested, and returns the URL
    to the new resized image. If width or height are zero then original
    ratio is maintained. When ``upscale`` is False, images smaller than
    the given size will not be grown to fill that size. The given width
    and height thus act as maximum dimensions.
    """

    if not image_url:
        return ""
    try:
        from PIL import Image, ImageFile, ImageOps
    except ImportError:
        return ""

    image_url = unquote(str(image_url)).split("?")[0]
    if image_url.startswith(settings.MEDIA_URL):
        image_url = image_url.replace(settings.MEDIA_URL, "", 1)
    image_dir, image_name = os.path.split(image_url)
    image_prefix, image_ext = os.path.splitext(image_name)
    filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
    thumb_name = "%s-%sx%s" % (image_prefix, width, height)
    if not upscale:
        thumb_name += "-no-upscale"
    if left != .5 or top != .5:
        left = min(1, max(0, left))
        top = min(1, max(0, top))
        thumb_name = "%s-%sx%s" % (thumb_name, left, top)
    thumb_name += "-padded-%s" % padding_color if padding else ""
    thumb_name = "%s%s" % (thumb_name, image_ext)

    # `image_name` is used here for the directory path, as each image
    # requires its own sub-directory using its own name - this is so
    # we can consistently delete all thumbnails for an individual
    # image, which is something we do in filebrowser when a new image
    # is written, allowing us to purge any previously generated
    # thumbnails that may match a new image name.
    thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
                             settings.THUMBNAILS_DIR_NAME, image_name)
    if not os.path.exists(thumb_dir):
        try:
            os.makedirs(thumb_dir)
        except OSError:
            pass

    thumb_path = os.path.join(thumb_dir, thumb_name)
    thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
                              quote(image_name.encode("utf-8")),
                              quote(thumb_name.encode("utf-8")))
    image_url_path = os.path.dirname(image_url)
    if image_url_path:
        thumb_url = "%s/%s" % (image_url_path, thumb_url)

    try:
        thumb_exists = os.path.exists(thumb_path)
    except UnicodeEncodeError:
        # The image that was saved to a filesystem with utf-8 support,
        # but somehow the locale has changed and the filesystem does not
        # support utf-8.
        from mezzanine.core.exceptions import FileSystemEncodingChanged
        raise FileSystemEncodingChanged()
    if thumb_exists:
        # Thumbnail exists, don't generate it.
        return thumb_url
    elif not default_storage.exists(image_url):
        # Requested image does not exist, just return its URL.
        return image_url

    f = default_storage.open(image_url)
    try:
        image = Image.open(f)
    except:
        # Invalid image format.
        return image_url

    image_info = image.info

    # Transpose to align the image to its orientation if necessary.
    # If the image is transposed, delete the exif information as
    # not all browsers support the CSS image-orientation:
    # - http://caniuse.com/#feat=css-image-orientation
    try:
        orientation = image._getexif().get(0x0112)
    except:
        orientation = None
    if orientation:
        methods = {
            2: (Image.FLIP_LEFT_RIGHT, ),
            3: (Image.ROTATE_180, ),
            4: (Image.FLIP_TOP_BOTTOM, ),
            5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
            6: (Image.ROTATE_270, ),
            7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
            8: (Image.ROTATE_90, )
        }.get(orientation, ())
        if methods:
            image_info.pop('exif', None)
            for method in methods:
                image = image.transpose(method)

    to_width = int(width)
    to_height = int(height)
    from_width = image.size[0]
    from_height = image.size[1]

    if not upscale:
        to_width = min(to_width, from_width)
        to_height = min(to_height, from_height)

    # Set dimensions.
    if to_width == 0:
        to_width = from_width * to_height // from_height
    elif to_height == 0:
        to_height = from_height * to_width // from_width
    if image.mode not in ("P", "L", "RGBA") \
            and filetype not in ("JPG", "JPEG"):
        try:
            image = image.convert("RGBA")
        except:
            return image_url
    # Required for progressive jpgs.
    ImageFile.MAXBLOCK = 2 * (max(image.size)**2)

    # Padding.
    if padding and to_width and to_height:
        from_ratio = float(from_width) / from_height
        to_ratio = float(to_width) / to_height
        pad_size = None
        if to_ratio < from_ratio:
            pad_height = int(to_height * (float(from_width) / to_width))
            pad_size = (from_width, pad_height)
            pad_top = (pad_height - from_height) // 2
            pad_left = 0
        elif to_ratio > from_ratio:
            pad_width = int(to_width * (float(from_height) / to_height))
            pad_size = (pad_width, from_height)
            pad_top = 0
            pad_left = (pad_width - from_width) // 2
        if pad_size is not None:
            pad_container = Image.new("RGBA", pad_size, padding_color)
            pad_container.paste(image, (pad_left, pad_top))
            image = pad_container

    # Create the thumbnail.
    to_size = (to_width, to_height)
    to_pos = (left, top)
    try:
        image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
        image = image.save(thumb_path, filetype, quality=quality, **image_info)
        # Push a remote copy of the thumbnail if MEDIA_URL is
        # absolute.
        if "://" in settings.MEDIA_URL:
            with open(thumb_path, "rb") as f:
                default_storage.save(unquote(thumb_url), File(f))
    except Exception:
        # If an error occurred, a corrupted image may have been saved,
        # so remove it, otherwise the check for it existing will just
        # return the corrupted image next time it's requested.
        try:
            os.remove(thumb_path)
        except Exception:
            pass
        return image_url
    return thumb_url
Exemple #41
0
def process_export(
        group_id,
        export_target='all',
        identifier=u'', user_id=0):
    """
    Process export for group members and/or group subscribers.
    """

    [group] = Group.objects.filter(id=group_id)[:1] or [None]
    if not group:
        return

    # pull 100 rows per query
    # be careful of the memory usage
    rows_per_batch = 100

    identifier = identifier or str(time.time())
    file_dir = 'export/groups/'

    file_path_temp = '%sgroup_%d_%s_%s_temp.csv' % (file_dir,
                                                 group.id,
                                                 export_target,
                                                identifier)

    # labels
    user_fields = ['id',
                   'first_name',
                   'last_name',
                   'email',
                   'username',
                   'is_active',
                   'is_staff',
                   'is_superuser']
    profile_fields = ['direct_mail',
                      'company',
                      'department',
                      'position_title',
                      'address',
                      'address2',
                      'city',
                      'state',
                      'zipcode',
                      'country',
                      'region',
                      'phone',
                      'notes',
                      'referral_source',
                      'create_dt']
    labels = user_fields + profile_fields

    field_dict = OrderedDict([(label.lower().replace(" ", "_"), ''
                               ) for label in labels])

    with default_storage.open(file_path_temp, 'w') as csvfile:        
        csv_writer = csv.DictWriter(csvfile, fieldnames=list(field_dict.keys()))
        csv_writer.writeheader()

        # process regular group members
        count_members = group.members.filter(
            group_member__status=True,
            group_member__status_detail='active').count()
        num_rows_processed = 0
        while num_rows_processed < count_members:
            users = group.members.filter(
                group_member__status=True,
                group_member__status_detail='active'
                ).select_related('profile'
                ).order_by('group_member__member_id')[num_rows_processed:(num_rows_processed + rows_per_batch)]
            num_rows_processed += rows_per_batch
            row_dict = field_dict.copy()
            for user in users:
                if hasattr(user, 'profile'):
                    profile = user.profile
                else:
                    profile = Profile.objects.create_profile(user)
                for field_name in user_fields:
                    if hasattr(user, field_name):
                        row_dict[field_name] = getattr(user, field_name)
                for field_name in profile_fields:
                    if hasattr(profile, field_name):
                        row_dict[field_name] = getattr(profile, field_name)
                for k, v in row_dict.items():
                    if not isinstance(v, str):
                        if isinstance(v, datetime):
                            row_dict[k] = v.strftime('%Y-%m-%d %H:%M:%S')
                        elif isinstance(v, date):
                            row_dict[k] = v.strftime('%Y-%m-%d')
                        else:
                            row_dict[k] = smart_str(v)
                    else:
                        row_dict[k] = escape_csv(v)

                csv_writer.writerow(row_dict)

    # rename the file name
    file_path = '%sgroup_%d_%s_%s.csv' % (file_dir,
                                          group.id,
                                          export_target,
                                          identifier)
    default_storage.save(file_path, default_storage.open(file_path_temp, 'rb'))

    # delete the temp file
    default_storage.delete(file_path_temp)

    # notify user that export is ready to download
    [user] = User.objects.filter(id=user_id)[:1] or [None]
    if user and user.email:
        download_url = reverse('group.members_export_download',
                               args=[group.slug, export_target, identifier])
        site_url = get_setting('site', 'global', 'siteurl')
        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        parms = {
            'group': group,
            'download_url': download_url,
            'user': user,
            'site_url': site_url,
            'site_display_name': site_display_name}

        subject = render_to_string(
            template_name='user_groups/exports/export_ready_subject.html', context=parms)
        subject = subject.strip('\n').strip('\r')

        body = render_to_string(
            template_name='user_groups/exports/export_ready_body.html', context=parms)

        email = Email(
            recipient=user.email,
            subject=subject,
            body=body)

        email.send()
Exemple #42
0
def submit_csv_async(username, xform, file_path):
    with default_storage.open(file_path) as csv_file:
        return submit_csv(username, xform, csv_file)
Exemple #43
0
 def save(self):
     file = default_storage.open(self._filename, "w+")
     file.write(json.dumps(self._dict))
     file.close()
Exemple #44
0
 def packaged_copy_over(self, dest, name):
     with storage.open(dest, 'wb') as f:
         copyfileobj(open(self.packaged_app_path(name)), f)
Exemple #45
0
def _renderer(event, layout):
    if isinstance(layout.background, File) and layout.background.name:
        bgf = default_storage.open(layout.background.name, "rb")
    else:
        bgf = open(finders.find('pretixplugins/badges/badge_default_a6l.pdf'), "rb")
    return Renderer(event, json.loads(layout.layout), bgf)
Exemple #46
0
 def testOpenMissing(self):
     self.assertRaises(OSError, lambda: default_storage.open("foo.txt"))
Exemple #47
0
 def not_blocked_json(self):
     with storage.open(self._not_blocked_path, 'r') as json_file:
         return json.load(json_file)
Exemple #48
0
 def manifest_copy_over(self, dest, name):
     with storage.open(dest, 'wb') as f:
         copyfileobj(open(self.manifest_path(name)), f)
Exemple #49
0
def run_validator(path, for_appversions=None, test_all_tiers=False,
                  overrides=None, compat=False, listed=True):
    """A pre-configured wrapper around the addon validator.

    *file_path*
        Path to addon / extension file to validate.

    *for_appversions=None*
        An optional dict of application versions to validate this addon
        for. The key is an application GUID and its value is a list of
        versions.

    *test_all_tiers=False*
        When False (default) the validator will not continue if it
        encounters fatal errors.  When True, all tests in all tiers are run.
        See bug 615426 for discussion on this default.

    *overrides=None*
        Normally the validator gets info from the manifest but there are a
        few things we need to override. See validator for supported overrides.
        Example: {'targetapp_maxVersion': {'<app guid>': '<version>'}}

    *compat=False*
        Set this to `True` when performing a bulk validation. This allows the
        validator to ignore certain tests that should not be run during bulk
        validation (see bug 735841).

    *listed=True*
        If the addon is unlisted, treat it as if it was a self hosted one
        (don't fail on the presence of an updateURL).

    To validate the addon for compatibility with Firefox 5 and 6,
    you'd pass in::

        for_appversions={amo.FIREFOX.guid: ['5.0.*', '6.0.*']}

    Not all application versions will have a set of registered
    compatibility tests.
    """
    from validator.validate import validate

    apps = dump_apps.Command.JSON_PATH
    if not os.path.exists(apps):
        call_command('dump_apps')

    with NamedTemporaryFile(suffix='_' + os.path.basename(path)) as temp:
        if path and not os.path.exists(path) and storage.exists(path):
            # This file doesn't exist locally. Write it to our
            # currently-open temp file and switch to that path.
            copyfileobj(storage.open(path), temp.file)
            path = temp.name

        with statsd.timer('devhub.validator'):
            json_result = validate(
                path,
                for_appversions=for_appversions,
                format='json',
                # When False, this flag says to stop testing after one
                # tier fails.
                determined=test_all_tiers,
                approved_applications=apps,
                overrides=overrides,
                compat_test=compat,
                listed=listed
            )

        track_validation_stats(json_result)

        return json_result
Exemple #50
0
    def put(self, request, path, pk=None):
        """
        Responds with the actual manifest
        :param request:
        :param path:
        :param pk:
        :return:
        """
        _, repository = self.get_dr_push(request, path)
        # iterate over all the layers and create
        chunk = request.META["wsgi.input"]
        artifact = self.receive_artifact(chunk)
        with storage.open(artifact.file.name) as artifact_file:
            raw_data = artifact_file.read()
        content_data = json.loads(raw_data)
        config_layer = content_data.get("config")
        config_blob = models.Blob.objects.get(
            digest=config_layer.get("digest"))

        manifest = models.Manifest(
            digest="sha256:{id}".format(id=artifact.sha256),
            schema_version=2,
            media_type=request.content_type,
            config_blob=config_blob,
        )
        try:
            manifest.save()
        except IntegrityError:
            manifest = models.Manifest.objects.get(digest=manifest.digest)
        ca = ContentArtifact(artifact=artifact,
                             content=manifest,
                             relative_path=manifest.digest)
        try:
            ca.save()
        except IntegrityError:
            pass
        layers = content_data.get("layers")
        blobs = []
        for layer in layers:
            blobs.append(layer.get("digest"))
        blobs_qs = models.Blob.objects.filter(digest__in=blobs)
        thru = []
        for blob in blobs_qs:
            thru.append(
                models.BlobManifest(manifest=manifest, manifest_blob=blob))
        models.BlobManifest.objects.bulk_create(objs=thru,
                                                ignore_conflicts=True,
                                                batch_size=1000)
        tag = models.Tag(name=pk, tagged_manifest=manifest)
        try:
            tag.save()
        except IntegrityError:
            pass
        with repository.new_version() as new_version:
            new_version.add_content(
                models.Manifest.objects.filter(digest=manifest.digest))
            new_version.remove_content(
                models.Tag.objects.filter(name=tag.name))
            new_version.add_content(
                models.Tag.objects.filter(name=tag.name,
                                          tagged_manifest=manifest))
        return ManifestResponse(manifest, path, request, status=201)
Exemple #51
0
 def testSaveTextMode(self):
     with self.save_file(content="foo"):
         self.assertEqual(default_storage.open("foo.txt").read(), b"foo")
Exemple #52
0
 def create_paths(self):
     if not storage.exists(self.file.file_path):
         with storage.open(self.file.file_path, 'w') as f:
             f.write('test data\n')
Exemple #53
0
 def testOpenWriteMode(self):
     self.assertRaises(ValueError,
                       lambda: default_storage.open("foo.txt", "wb"))
Exemple #54
0
def has_null_byte(file_path):
    f = default_storage.open(file_path, 'r')
    data = f.read()
    f.close()
    return ('\0' in data)
Exemple #55
0
def read_entry(title):
    try:
        f = default_storage.open(f"storage/{title}.md")
        return f.read().decode("utf-8")
    except FileNotFoundError:
        return None
Exemple #56
0
 def webapp(self, data=None, contents='', suffix='.webapp'):
     tmp = tempfile.mktemp(suffix=suffix)
     self.tmp_files.append(tmp)
     with storage.open(tmp, 'wb') as f:
         f.write(json.dumps(data) if data else contents)
     return tmp
Exemple #57
0
 def testEndpointUrl(self):
     with self.settings(AWS_S3_ENDPOINT_URL="https://s3.amazonaws.com"
                        ), self.save_file() as name:
         self.assertEqual(name, "foo.txt")
         self.assertEqual(default_storage.open(name).read(), b"foo")
Exemple #58
0
 def fake_xpi(self, filename=None):
     """Any useless file that has a name property (for Django)."""
     if not filename:
         return open(get_image_path('non-animated.gif'), 'rb')
     return storage.open(filename, 'rb')
Exemple #59
0
def get_exif_for_file(file_obj):
    im = PILImage.open(storage.open(file_obj.name), 'r')
    return get_exif(im)
Exemple #60
0
 def clean_files(self, f):
     if f.mirror_file_path and storage.exists(f.mirror_file_path):
         storage.delete(f.mirror_file_path)
     if not storage.exists(f.file_path):
         with storage.open(f.file_path, 'w') as fp:
             fp.write('sample data\n')